1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/archiveBuilder.hpp"
26 #include "cds/archiveUtils.inline.hpp"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "metaprogramming/primitiveConversions.hpp"
52 #include "oops/access.hpp"
53 #include "oops/fieldStreams.inline.hpp"
54 #include "oops/inlineKlass.inline.hpp"
55 #include "oops/klass.hpp"
56 #include "oops/method.inline.hpp"
57 #include "oops/objArrayKlass.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "prims/forte.hpp"
61 #include "prims/jvmtiExport.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "prims/methodHandles.hpp"
64 #include "prims/nativeLookup.hpp"
65 #include "runtime/arguments.hpp"
66 #include "runtime/atomicAccess.hpp"
67 #include "runtime/basicLock.inline.hpp"
68 #include "runtime/frame.inline.hpp"
69 #include "runtime/handles.inline.hpp"
70 #include "runtime/init.hpp"
71 #include "runtime/interfaceSupport.inline.hpp"
72 #include "runtime/java.hpp"
73 #include "runtime/javaCalls.hpp"
74 #include "runtime/jniHandles.inline.hpp"
75 #include "runtime/perfData.hpp"
76 #include "runtime/sharedRuntime.hpp"
77 #include "runtime/signature.hpp"
78 #include "runtime/stackWatermarkSet.hpp"
79 #include "runtime/stubRoutines.hpp"
80 #include "runtime/synchronizer.inline.hpp"
81 #include "runtime/timerTrace.hpp"
82 #include "runtime/vframe.inline.hpp"
83 #include "runtime/vframeArray.hpp"
84 #include "runtime/vm_version.hpp"
85 #include "utilities/copy.hpp"
86 #include "utilities/dtrace.hpp"
87 #include "utilities/events.hpp"
88 #include "utilities/globalDefinitions.hpp"
89 #include "utilities/hashTable.hpp"
90 #include "utilities/macros.hpp"
91 #include "utilities/xmlstream.hpp"
92 #ifdef COMPILER1
93 #include "c1/c1_Runtime1.hpp"
94 #endif
95 #if INCLUDE_JFR
96 #include "jfr/jfr.inline.hpp"
97 #endif
98
99 // Shared runtime stub routines reside in their own unique blob with a
100 // single entry point
101
102
103 #define SHARED_STUB_FIELD_DEFINE(name, type) \
104 type* SharedRuntime::BLOB_FIELD_NAME(name);
105 SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE)
106 #undef SHARED_STUB_FIELD_DEFINE
107
108 nmethod* SharedRuntime::_cont_doYield_stub;
109
110 #if 0
111 // TODO tweak global stub name generation to match this
112 #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob",
113 const char *SharedRuntime::_stub_names[] = {
114 SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE)
115 };
116 #endif
117
118 //----------------------------generate_stubs-----------------------------------
119 void SharedRuntime::generate_initial_stubs() {
120 // Build this early so it's available for the interpreter.
121 _throw_StackOverflowError_blob =
122 generate_throw_exception(StubId::shared_throw_StackOverflowError_id,
123 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
124 }
125
126 void SharedRuntime::generate_stubs() {
127 _wrong_method_blob =
128 generate_resolve_blob(StubId::shared_wrong_method_id,
129 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method));
130 _wrong_method_abstract_blob =
131 generate_resolve_blob(StubId::shared_wrong_method_abstract_id,
132 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract));
133 _ic_miss_blob =
134 generate_resolve_blob(StubId::shared_ic_miss_id,
135 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss));
136 _resolve_opt_virtual_call_blob =
137 generate_resolve_blob(StubId::shared_resolve_opt_virtual_call_id,
138 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C));
139 _resolve_virtual_call_blob =
140 generate_resolve_blob(StubId::shared_resolve_virtual_call_id,
141 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C));
142 _resolve_static_call_blob =
143 generate_resolve_blob(StubId::shared_resolve_static_call_id,
144 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C));
145
146 _throw_delayed_StackOverflowError_blob =
147 generate_throw_exception(StubId::shared_throw_delayed_StackOverflowError_id,
148 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
149
150 _throw_AbstractMethodError_blob =
151 generate_throw_exception(StubId::shared_throw_AbstractMethodError_id,
152 CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
153
154 _throw_IncompatibleClassChangeError_blob =
155 generate_throw_exception(StubId::shared_throw_IncompatibleClassChangeError_id,
156 CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
157
158 _throw_NullPointerException_at_call_blob =
159 generate_throw_exception(StubId::shared_throw_NullPointerException_at_call_id,
160 CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
161
162 #if COMPILER2_OR_JVMCI
163 // Vectors are generated only by C2 and JVMCI.
164 bool support_wide = is_wide_vector(MaxVectorSize);
165 if (support_wide) {
166 _polling_page_vectors_safepoint_handler_blob =
167 generate_handler_blob(StubId::shared_polling_page_vectors_safepoint_handler_id,
168 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
169 }
170 #endif // COMPILER2_OR_JVMCI
171 _polling_page_safepoint_handler_blob =
172 generate_handler_blob(StubId::shared_polling_page_safepoint_handler_id,
173 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
174 _polling_page_return_handler_blob =
175 generate_handler_blob(StubId::shared_polling_page_return_handler_id,
176 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
177
178 generate_deopt_blob();
179 }
180
181 void SharedRuntime::init_adapter_library() {
182 AdapterHandlerLibrary::initialize();
183 }
184
185 #if INCLUDE_JFR
186 //------------------------------generate jfr runtime stubs ------
187 void SharedRuntime::generate_jfr_stubs() {
188 ResourceMark rm;
189 const char* timer_msg = "SharedRuntime generate_jfr_stubs";
190 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
191
192 _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
193 _jfr_return_lease_blob = generate_jfr_return_lease();
194 }
195
196 #endif // INCLUDE_JFR
197
198 #include <math.h>
199
200 // Implementation of SharedRuntime
201
202 #ifndef PRODUCT
203 // For statistics
204 uint SharedRuntime::_ic_miss_ctr = 0;
205 uint SharedRuntime::_wrong_method_ctr = 0;
206 uint SharedRuntime::_resolve_static_ctr = 0;
207 uint SharedRuntime::_resolve_virtual_ctr = 0;
208 uint SharedRuntime::_resolve_opt_virtual_ctr = 0;
209 uint SharedRuntime::_implicit_null_throws = 0;
210 uint SharedRuntime::_implicit_div0_throws = 0;
211
212 int64_t SharedRuntime::_nof_normal_calls = 0;
213 int64_t SharedRuntime::_nof_inlined_calls = 0;
214 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
215 int64_t SharedRuntime::_nof_static_calls = 0;
216 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
217 int64_t SharedRuntime::_nof_interface_calls = 0;
218 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
219
220 uint SharedRuntime::_new_instance_ctr=0;
221 uint SharedRuntime::_new_array_ctr=0;
222 uint SharedRuntime::_multi2_ctr=0;
223 uint SharedRuntime::_multi3_ctr=0;
224 uint SharedRuntime::_multi4_ctr=0;
225 uint SharedRuntime::_multi5_ctr=0;
226 uint SharedRuntime::_mon_enter_stub_ctr=0;
227 uint SharedRuntime::_mon_exit_stub_ctr=0;
228 uint SharedRuntime::_mon_enter_ctr=0;
229 uint SharedRuntime::_mon_exit_ctr=0;
230 uint SharedRuntime::_partial_subtype_ctr=0;
231 uint SharedRuntime::_jbyte_array_copy_ctr=0;
232 uint SharedRuntime::_jshort_array_copy_ctr=0;
233 uint SharedRuntime::_jint_array_copy_ctr=0;
234 uint SharedRuntime::_jlong_array_copy_ctr=0;
235 uint SharedRuntime::_oop_array_copy_ctr=0;
236 uint SharedRuntime::_checkcast_array_copy_ctr=0;
237 uint SharedRuntime::_unsafe_array_copy_ctr=0;
238 uint SharedRuntime::_generic_array_copy_ctr=0;
239 uint SharedRuntime::_slow_array_copy_ctr=0;
240 uint SharedRuntime::_find_handler_ctr=0;
241 uint SharedRuntime::_rethrow_ctr=0;
242 uint SharedRuntime::_unsafe_set_memory_ctr=0;
243
244 int SharedRuntime::_ICmiss_index = 0;
245 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
246 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
247
248
249 void SharedRuntime::trace_ic_miss(address at) {
250 for (int i = 0; i < _ICmiss_index; i++) {
251 if (_ICmiss_at[i] == at) {
252 _ICmiss_count[i]++;
253 return;
254 }
255 }
256 int index = _ICmiss_index++;
257 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
258 _ICmiss_at[index] = at;
259 _ICmiss_count[index] = 1;
260 }
261
262 void SharedRuntime::print_ic_miss_histogram() {
263 if (ICMissHistogram) {
264 tty->print_cr("IC Miss Histogram:");
265 int tot_misses = 0;
266 for (int i = 0; i < _ICmiss_index; i++) {
267 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
268 tot_misses += _ICmiss_count[i];
269 }
270 tty->print_cr("Total IC misses: %7d", tot_misses);
271 }
272 }
273
274 #ifdef COMPILER2
275 // Runtime methods for printf-style debug nodes (same printing format as fieldDescriptor::print_on_for)
276 void SharedRuntime::debug_print_value(jboolean x) {
277 tty->print_cr("boolean %d", x);
278 }
279
280 void SharedRuntime::debug_print_value(jbyte x) {
281 tty->print_cr("byte %d", x);
282 }
283
284 void SharedRuntime::debug_print_value(jshort x) {
285 tty->print_cr("short %d", x);
286 }
287
288 void SharedRuntime::debug_print_value(jchar x) {
289 tty->print_cr("char %c %d", isprint(x) ? x : ' ', x);
290 }
291
292 void SharedRuntime::debug_print_value(jint x) {
293 tty->print_cr("int %d", x);
294 }
295
296 void SharedRuntime::debug_print_value(jlong x) {
297 tty->print_cr("long " JLONG_FORMAT, x);
298 }
299
300 void SharedRuntime::debug_print_value(jfloat x) {
301 tty->print_cr("float %f", x);
302 }
303
304 void SharedRuntime::debug_print_value(jdouble x) {
305 tty->print_cr("double %lf", x);
306 }
307
308 void SharedRuntime::debug_print_value(oopDesc* x) {
309 x->print();
310 }
311 #endif // COMPILER2
312
313 #endif // PRODUCT
314
315
316 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
317 return x * y;
318 JRT_END
319
320
321 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
322 if (x == min_jlong && y == CONST64(-1)) {
323 return x;
324 } else {
325 return x / y;
326 }
327 JRT_END
328
329
330 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
331 if (x == min_jlong && y == CONST64(-1)) {
332 return 0;
333 } else {
334 return x % y;
335 }
336 JRT_END
337
338
339 #ifdef _WIN64
340 const juint float_sign_mask = 0x7FFFFFFF;
341 const juint float_infinity = 0x7F800000;
342 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
343 const julong double_infinity = CONST64(0x7FF0000000000000);
344 #endif
345
346 #if !defined(X86)
347 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
348 #ifdef _WIN64
349 // 64-bit Windows on amd64 returns the wrong values for
350 // infinity operands.
351 juint xbits = PrimitiveConversions::cast<juint>(x);
352 juint ybits = PrimitiveConversions::cast<juint>(y);
353 // x Mod Infinity == x unless x is infinity
354 if (((xbits & float_sign_mask) != float_infinity) &&
355 ((ybits & float_sign_mask) == float_infinity) ) {
356 return x;
357 }
358 return ((jfloat)fmod_winx64((double)x, (double)y));
359 #else
360 return ((jfloat)fmod((double)x,(double)y));
361 #endif
362 JRT_END
363
364 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
365 #ifdef _WIN64
366 julong xbits = PrimitiveConversions::cast<julong>(x);
367 julong ybits = PrimitiveConversions::cast<julong>(y);
368 // x Mod Infinity == x unless x is infinity
369 if (((xbits & double_sign_mask) != double_infinity) &&
370 ((ybits & double_sign_mask) == double_infinity) ) {
371 return x;
372 }
373 return ((jdouble)fmod_winx64((double)x, (double)y));
374 #else
375 return ((jdouble)fmod((double)x,(double)y));
376 #endif
377 JRT_END
378 #endif // !X86
379
380 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
381 return (jfloat)x;
382 JRT_END
383
384 #ifdef __SOFTFP__
385 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
386 return x + y;
387 JRT_END
388
389 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
390 return x - y;
391 JRT_END
392
393 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
394 return x * y;
395 JRT_END
396
397 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
398 return x / y;
399 JRT_END
400
401 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
402 return x + y;
403 JRT_END
404
405 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
406 return x - y;
407 JRT_END
408
409 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
410 return x * y;
411 JRT_END
412
413 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
414 return x / y;
415 JRT_END
416
417 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
418 return (jdouble)x;
419 JRT_END
420
421 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
422 return (jdouble)x;
423 JRT_END
424
425 JRT_LEAF(int, SharedRuntime::fcmpl(float x, float y))
426 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan*/
427 JRT_END
428
429 JRT_LEAF(int, SharedRuntime::fcmpg(float x, float y))
430 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
431 JRT_END
432
433 JRT_LEAF(int, SharedRuntime::dcmpl(double x, double y))
434 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
435 JRT_END
436
437 JRT_LEAF(int, SharedRuntime::dcmpg(double x, double y))
438 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
439 JRT_END
440
441 // Functions to return the opposite of the aeabi functions for nan.
442 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
443 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
444 JRT_END
445
446 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
447 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
448 JRT_END
449
450 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
451 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
452 JRT_END
453
454 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
455 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
456 JRT_END
457
458 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
459 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
460 JRT_END
461
462 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
463 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
464 JRT_END
465
466 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
467 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
468 JRT_END
469
470 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
471 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
472 JRT_END
473
474 // Intrinsics make gcc generate code for these.
475 float SharedRuntime::fneg(float f) {
476 return -f;
477 }
478
479 double SharedRuntime::dneg(double f) {
480 return -f;
481 }
482
483 #endif // __SOFTFP__
484
485 #if defined(__SOFTFP__) || defined(E500V2)
486 // Intrinsics make gcc generate code for these.
487 double SharedRuntime::dabs(double f) {
488 return (f <= (double)0.0) ? (double)0.0 - f : f;
489 }
490
491 #endif
492
493 #if defined(__SOFTFP__)
494 double SharedRuntime::dsqrt(double f) {
495 return sqrt(f);
496 }
497 #endif
498
499 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
500 if (g_isnan(x))
501 return 0;
502 if (x >= (jfloat) max_jint)
503 return max_jint;
504 if (x <= (jfloat) min_jint)
505 return min_jint;
506 return (jint) x;
507 JRT_END
508
509
510 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
511 if (g_isnan(x))
512 return 0;
513 if (x >= (jfloat) max_jlong)
514 return max_jlong;
515 if (x <= (jfloat) min_jlong)
516 return min_jlong;
517 return (jlong) x;
518 JRT_END
519
520
521 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
522 if (g_isnan(x))
523 return 0;
524 if (x >= (jdouble) max_jint)
525 return max_jint;
526 if (x <= (jdouble) min_jint)
527 return min_jint;
528 return (jint) x;
529 JRT_END
530
531
532 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
533 if (g_isnan(x))
534 return 0;
535 if (x >= (jdouble) max_jlong)
536 return max_jlong;
537 if (x <= (jdouble) min_jlong)
538 return min_jlong;
539 return (jlong) x;
540 JRT_END
541
542
543 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
544 return (jfloat)x;
545 JRT_END
546
547
548 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
549 return (jfloat)x;
550 JRT_END
551
552
553 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
554 return (jdouble)x;
555 JRT_END
556
557
558 // Exception handling across interpreter/compiler boundaries
559 //
560 // exception_handler_for_return_address(...) returns the continuation address.
561 // The continuation address is the entry point of the exception handler of the
562 // previous frame depending on the return address.
563
564 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
565 // Note: This is called when we have unwound the frame of the callee that did
566 // throw an exception. So far, no check has been performed by the StackWatermarkSet.
567 // Notably, the stack is not walkable at this point, and hence the check must
568 // be deferred until later. Specifically, any of the handlers returned here in
569 // this function, will get dispatched to, and call deferred checks to
570 // StackWatermarkSet::after_unwind at a point where the stack is walkable.
571 assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
572 assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
573
574 #if INCLUDE_JVMCI
575 // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
576 // and other exception handler continuations do not read it
577 current->set_exception_pc(nullptr);
578 #endif // INCLUDE_JVMCI
579
580 if (Continuation::is_return_barrier_entry(return_address)) {
581 return StubRoutines::cont_returnBarrierExc();
582 }
583
584 // The fastest case first
585 CodeBlob* blob = CodeCache::find_blob(return_address);
586 nmethod* nm = (blob != nullptr) ? blob->as_nmethod_or_null() : nullptr;
587 if (nm != nullptr) {
588 // native nmethods don't have exception handlers
589 assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
590 assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
591 if (nm->is_deopt_pc(return_address)) {
592 // If we come here because of a stack overflow, the stack may be
593 // unguarded. Reguard the stack otherwise if we return to the
594 // deopt blob and the stack bang causes a stack overflow we
595 // crash.
596 StackOverflow* overflow_state = current->stack_overflow_state();
597 bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
598 if (overflow_state->reserved_stack_activation() != current->stack_base()) {
599 overflow_state->set_reserved_stack_activation(current->stack_base());
600 }
601 assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
602 // The deferred StackWatermarkSet::after_unwind check will be performed in
603 // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
604 return SharedRuntime::deopt_blob()->unpack_with_exception();
605 } else {
606 // The deferred StackWatermarkSet::after_unwind check will be performed in
607 // * OptoRuntime::handle_exception_C_helper for C2 code
608 // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
609 return nm->exception_begin();
610 }
611 }
612
613 // Entry code
614 if (StubRoutines::returns_to_call_stub(return_address)) {
615 // The deferred StackWatermarkSet::after_unwind check will be performed in
616 // JavaCallWrapper::~JavaCallWrapper
617 assert (StubRoutines::catch_exception_entry() != nullptr, "must be generated before");
618 return StubRoutines::catch_exception_entry();
619 }
620 if (blob != nullptr && blob->is_upcall_stub()) {
621 return StubRoutines::upcall_stub_exception_handler();
622 }
623 // Interpreted code
624 if (Interpreter::contains(return_address)) {
625 // The deferred StackWatermarkSet::after_unwind check will be performed in
626 // InterpreterRuntime::exception_handler_for_exception
627 return Interpreter::rethrow_exception_entry();
628 }
629
630 guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
631 guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
632
633 #ifndef PRODUCT
634 { ResourceMark rm;
635 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
636 os::print_location(tty, (intptr_t)return_address);
637 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
638 tty->print_cr("b) other problem");
639 }
640 #endif // PRODUCT
641 ShouldNotReachHere();
642 return nullptr;
643 }
644
645
646 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
647 return raw_exception_handler_for_return_address(current, return_address);
648 JRT_END
649
650
651 address SharedRuntime::get_poll_stub(address pc) {
652 address stub;
653 // Look up the code blob
654 CodeBlob *cb = CodeCache::find_blob(pc);
655
656 // Should be an nmethod
657 guarantee(cb != nullptr && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod");
658
659 // Look up the relocation information
660 assert(cb->as_nmethod()->is_at_poll_or_poll_return(pc),
661 "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
662
663 #ifdef ASSERT
664 if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
665 tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
666 Disassembler::decode(cb);
667 fatal("Only polling locations are used for safepoint");
668 }
669 #endif
670
671 bool at_poll_return = cb->as_nmethod()->is_at_poll_return(pc);
672 bool has_wide_vectors = cb->as_nmethod()->has_wide_vectors();
673 if (at_poll_return) {
674 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
675 "polling page return stub not created yet");
676 stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
677 } else if (has_wide_vectors) {
678 assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr,
679 "polling page vectors safepoint stub not created yet");
680 stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
681 } else {
682 assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr,
683 "polling page safepoint stub not created yet");
684 stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
685 }
686 log_debug(safepoint)("... found polling page %s exception at pc = "
687 INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
688 at_poll_return ? "return" : "loop",
689 (intptr_t)pc, (intptr_t)stub);
690 return stub;
691 }
692
693 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
694 if (JvmtiExport::can_post_on_exceptions()) {
695 vframeStream vfst(current, true);
696 methodHandle method = methodHandle(current, vfst.method());
697 address bcp = method()->bcp_from(vfst.bci());
698 JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
699 }
700
701 #if INCLUDE_JVMCI
702 if (EnableJVMCI) {
703 vframeStream vfst(current, true);
704 methodHandle method = methodHandle(current, vfst.method());
705 int bci = vfst.bci();
706 MethodData* trap_mdo = method->method_data();
707 if (trap_mdo != nullptr) {
708 // Set exception_seen if the exceptional bytecode is an invoke
709 Bytecode_invoke call = Bytecode_invoke_check(method, bci);
710 if (call.is_valid()) {
711 ResourceMark rm(current);
712
713 // Lock to read ProfileData, and ensure lock is not broken by a safepoint
714 MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
715
716 ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
717 if (pdata != nullptr && pdata->is_BitData()) {
718 BitData* bit_data = (BitData*) pdata;
719 bit_data->set_exception_seen();
720 }
721 }
722 }
723 }
724 #endif
725
726 Exceptions::_throw(current, __FILE__, __LINE__, h_exception);
727 }
728
729 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message) {
730 Handle h_exception = Exceptions::new_exception(current, name, message);
731 throw_and_post_jvmti_exception(current, h_exception);
732 }
733
734 #if INCLUDE_JVMTI
735 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_start(oopDesc* vt, jboolean hide, JavaThread* current))
736 assert(hide == JNI_FALSE, "must be VTMS transition finish");
737 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
738 JvmtiVTMSTransitionDisabler::VTMS_vthread_start(vthread);
739 JNIHandles::destroy_local(vthread);
740 JRT_END
741
742 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_end(oopDesc* vt, jboolean hide, JavaThread* current))
743 assert(hide == JNI_TRUE, "must be VTMS transition start");
744 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
745 JvmtiVTMSTransitionDisabler::VTMS_vthread_end(vthread);
746 JNIHandles::destroy_local(vthread);
747 JRT_END
748
749 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_mount(oopDesc* vt, jboolean hide, JavaThread* current))
750 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
751 JvmtiVTMSTransitionDisabler::VTMS_vthread_mount(vthread, hide);
752 JNIHandles::destroy_local(vthread);
753 JRT_END
754
755 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_unmount(oopDesc* vt, jboolean hide, JavaThread* current))
756 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
757 JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(vthread, hide);
758 JNIHandles::destroy_local(vthread);
759 JRT_END
760 #endif // INCLUDE_JVMTI
761
762 // The interpreter code to call this tracing function is only
763 // called/generated when UL is on for redefine, class and has the right level
764 // and tags. Since obsolete methods are never compiled, we don't have
765 // to modify the compilers to generate calls to this function.
766 //
767 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
768 JavaThread* thread, Method* method))
769 if (method->is_obsolete()) {
770 // We are calling an obsolete method, but this is not necessarily
771 // an error. Our method could have been redefined just after we
772 // fetched the Method* from the constant pool.
773 ResourceMark rm;
774 log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
775 }
776 return 0;
777 JRT_END
778
779 // ret_pc points into caller; we are returning caller's exception handler
780 // for given exception
781 // Note that the implementation of this method assumes it's only called when an exception has actually occured
782 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
783 bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
784 assert(nm != nullptr, "must exist");
785 ResourceMark rm;
786
787 #if INCLUDE_JVMCI
788 if (nm->is_compiled_by_jvmci()) {
789 // lookup exception handler for this pc
790 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
791 ExceptionHandlerTable table(nm);
792 HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
793 if (t != nullptr) {
794 return nm->code_begin() + t->pco();
795 } else {
796 return Deoptimization::deoptimize_for_missing_exception_handler(nm);
797 }
798 }
799 #endif // INCLUDE_JVMCI
800
801 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
802 // determine handler bci, if any
803 EXCEPTION_MARK;
804
805 int handler_bci = -1;
806 int scope_depth = 0;
807 if (!force_unwind) {
808 int bci = sd->bci();
809 bool recursive_exception = false;
810 do {
811 bool skip_scope_increment = false;
812 // exception handler lookup
813 Klass* ek = exception->klass();
814 methodHandle mh(THREAD, sd->method());
815 handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD);
816 if (HAS_PENDING_EXCEPTION) {
817 recursive_exception = true;
818 // We threw an exception while trying to find the exception handler.
819 // Transfer the new exception to the exception handle which will
820 // be set into thread local storage, and do another lookup for an
821 // exception handler for this exception, this time starting at the
822 // BCI of the exception handler which caused the exception to be
823 // thrown (bugs 4307310 and 4546590). Set "exception" reference
824 // argument to ensure that the correct exception is thrown (4870175).
825 recursive_exception_occurred = true;
826 exception = Handle(THREAD, PENDING_EXCEPTION);
827 CLEAR_PENDING_EXCEPTION;
828 if (handler_bci >= 0) {
829 bci = handler_bci;
830 handler_bci = -1;
831 skip_scope_increment = true;
832 }
833 }
834 else {
835 recursive_exception = false;
836 }
837 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
838 sd = sd->sender();
839 if (sd != nullptr) {
840 bci = sd->bci();
841 }
842 ++scope_depth;
843 }
844 } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr));
845 }
846
847 // found handling method => lookup exception handler
848 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
849
850 ExceptionHandlerTable table(nm);
851 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
852 if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
853 // Allow abbreviated catch tables. The idea is to allow a method
854 // to materialize its exceptions without committing to the exact
855 // routing of exceptions. In particular this is needed for adding
856 // a synthetic handler to unlock monitors when inlining
857 // synchronized methods since the unlock path isn't represented in
858 // the bytecodes.
859 t = table.entry_for(catch_pco, -1, 0);
860 }
861
862 #ifdef COMPILER1
863 if (t == nullptr && nm->is_compiled_by_c1()) {
864 assert(nm->unwind_handler_begin() != nullptr, "");
865 return nm->unwind_handler_begin();
866 }
867 #endif
868
869 if (t == nullptr) {
870 ttyLocker ttyl;
871 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
872 tty->print_cr(" Exception:");
873 exception->print();
874 tty->cr();
875 tty->print_cr(" Compiled exception table :");
876 table.print();
877 nm->print();
878 nm->print_code();
879 guarantee(false, "missing exception handler");
880 return nullptr;
881 }
882
883 if (handler_bci != -1) { // did we find a handler in this method?
884 sd->method()->set_exception_handler_entered(handler_bci); // profile
885 }
886 return nm->code_begin() + t->pco();
887 }
888
889 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
890 // These errors occur only at call sites
891 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
892 JRT_END
893
894 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
895 // These errors occur only at call sites
896 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
897 JRT_END
898
899 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
900 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
901 JRT_END
902
903 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current))
904 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
905 JRT_END
906
907 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current))
908 // This entry point is effectively only used for NullPointerExceptions which occur at inline
909 // cache sites (when the callee activation is not yet set up) so we are at a call site
910 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
911 JRT_END
912
913 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current))
914 throw_StackOverflowError_common(current, false);
915 JRT_END
916
917 JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* current))
918 throw_StackOverflowError_common(current, true);
919 JRT_END
920
921 void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool delayed) {
922 // We avoid using the normal exception construction in this case because
923 // it performs an upcall to Java, and we're already out of stack space.
924 JavaThread* THREAD = current; // For exception macros.
925 InstanceKlass* k = vmClasses::StackOverflowError_klass();
926 oop exception_oop = k->allocate_instance(CHECK);
927 if (delayed) {
928 java_lang_Throwable::set_message(exception_oop,
929 Universe::delayed_stack_overflow_error_message());
930 }
931 Handle exception (current, exception_oop);
932 if (StackTraceInThrowable) {
933 java_lang_Throwable::fill_in_stack_trace(exception);
934 }
935 // Remove the ScopedValue bindings in case we got a
936 // StackOverflowError while we were trying to remove ScopedValue
937 // bindings.
938 current->clear_scopedValueBindings();
939 // Increment counter for hs_err file reporting
940 AtomicAccess::inc(&Exceptions::_stack_overflow_errors);
941 throw_and_post_jvmti_exception(current, exception);
942 }
943
944 address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
945 address pc,
946 ImplicitExceptionKind exception_kind)
947 {
948 address target_pc = nullptr;
949
950 if (Interpreter::contains(pc)) {
951 switch (exception_kind) {
952 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
953 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
954 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
955 default: ShouldNotReachHere();
956 }
957 } else {
958 switch (exception_kind) {
959 case STACK_OVERFLOW: {
960 // Stack overflow only occurs upon frame setup; the callee is
961 // going to be unwound. Dispatch to a shared runtime stub
962 // which will cause the StackOverflowError to be fabricated
963 // and processed.
964 // Stack overflow should never occur during deoptimization:
965 // the compiled method bangs the stack by as much as the
966 // interpreter would need in case of a deoptimization. The
967 // deoptimization blob and uncommon trap blob bang the stack
968 // in a debug VM to verify the correctness of the compiled
969 // method stack banging.
970 assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
971 Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
972 return SharedRuntime::throw_StackOverflowError_entry();
973 }
974
975 case IMPLICIT_NULL: {
976 if (VtableStubs::contains(pc)) {
977 // We haven't yet entered the callee frame. Fabricate an
978 // exception and begin dispatching it in the caller. Since
979 // the caller was at a call site, it's safe to destroy all
980 // caller-saved registers, as these entry points do.
981 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
982
983 // If vt_stub is null, then return null to signal handler to report the SEGV error.
984 if (vt_stub == nullptr) return nullptr;
985
986 if (vt_stub->is_abstract_method_error(pc)) {
987 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
988 Events::log_exception(current, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
989 // Instead of throwing the abstract method error here directly, we re-resolve
990 // and will throw the AbstractMethodError during resolve. As a result, we'll
991 // get a more detailed error message.
992 return SharedRuntime::get_handle_wrong_method_stub();
993 } else {
994 Events::log_exception(current, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
995 // Assert that the signal comes from the expected location in stub code.
996 assert(vt_stub->is_null_pointer_exception(pc),
997 "obtained signal from unexpected location in stub code");
998 return SharedRuntime::throw_NullPointerException_at_call_entry();
999 }
1000 } else {
1001 CodeBlob* cb = CodeCache::find_blob(pc);
1002
1003 // If code blob is null, then return null to signal handler to report the SEGV error.
1004 if (cb == nullptr) return nullptr;
1005
1006 // Exception happened in CodeCache. Must be either:
1007 // 1. Inline-cache check in C2I handler blob,
1008 // 2. Inline-cache check in nmethod, or
1009 // 3. Implicit null exception in nmethod
1010
1011 if (!cb->is_nmethod()) {
1012 bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
1013 if (!is_in_blob) {
1014 // Allow normal crash reporting to handle this
1015 return nullptr;
1016 }
1017 Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
1018 // There is no handler here, so we will simply unwind.
1019 return SharedRuntime::throw_NullPointerException_at_call_entry();
1020 }
1021
1022 // Otherwise, it's a compiled method. Consult its exception handlers.
1023 nmethod* nm = cb->as_nmethod();
1024 if (nm->inlinecache_check_contains(pc)) {
1025 // exception happened inside inline-cache check code
1026 // => the nmethod is not yet active (i.e., the frame
1027 // is not set up yet) => use return address pushed by
1028 // caller => don't push another return address
1029 Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
1030 return SharedRuntime::throw_NullPointerException_at_call_entry();
1031 }
1032
1033 if (nm->method()->is_method_handle_intrinsic()) {
1034 // exception happened inside MH dispatch code, similar to a vtable stub
1035 Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
1036 return SharedRuntime::throw_NullPointerException_at_call_entry();
1037 }
1038
1039 #ifndef PRODUCT
1040 _implicit_null_throws++;
1041 #endif
1042 target_pc = nm->continuation_for_implicit_null_exception(pc);
1043 // If there's an unexpected fault, target_pc might be null,
1044 // in which case we want to fall through into the normal
1045 // error handling code.
1046 }
1047
1048 break; // fall through
1049 }
1050
1051
1052 case IMPLICIT_DIVIDE_BY_ZERO: {
1053 nmethod* nm = CodeCache::find_nmethod(pc);
1054 guarantee(nm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions");
1055 #ifndef PRODUCT
1056 _implicit_div0_throws++;
1057 #endif
1058 target_pc = nm->continuation_for_implicit_div0_exception(pc);
1059 // If there's an unexpected fault, target_pc might be null,
1060 // in which case we want to fall through into the normal
1061 // error handling code.
1062 break; // fall through
1063 }
1064
1065 default: ShouldNotReachHere();
1066 }
1067
1068 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
1069
1070 if (exception_kind == IMPLICIT_NULL) {
1071 #ifndef PRODUCT
1072 // for AbortVMOnException flag
1073 Exceptions::debug_check_abort("java.lang.NullPointerException");
1074 #endif //PRODUCT
1075 Events::log_exception(current, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1076 } else {
1077 #ifndef PRODUCT
1078 // for AbortVMOnException flag
1079 Exceptions::debug_check_abort("java.lang.ArithmeticException");
1080 #endif //PRODUCT
1081 Events::log_exception(current, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1082 }
1083 return target_pc;
1084 }
1085
1086 ShouldNotReachHere();
1087 return nullptr;
1088 }
1089
1090
1091 /**
1092 * Throws an java/lang/UnsatisfiedLinkError. The address of this method is
1093 * installed in the native function entry of all native Java methods before
1094 * they get linked to their actual native methods.
1095 *
1096 * \note
1097 * This method actually never gets called! The reason is because
1098 * the interpreter's native entries call NativeLookup::lookup() which
1099 * throws the exception when the lookup fails. The exception is then
1100 * caught and forwarded on the return from NativeLookup::lookup() call
1101 * before the call to the native function. This might change in the future.
1102 */
1103 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
1104 {
1105 // We return a bad value here to make sure that the exception is
1106 // forwarded before we look at the return value.
1107 THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
1108 }
1109 JNI_END
1110
1111 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
1112 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
1113 }
1114
1115 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
1116 #if INCLUDE_JVMCI
1117 if (!obj->klass()->has_finalizer()) {
1118 return;
1119 }
1120 #endif // INCLUDE_JVMCI
1121 assert(oopDesc::is_oop(obj), "must be a valid oop");
1122 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1123 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1124 JRT_END
1125
1126 jlong SharedRuntime::get_java_tid(JavaThread* thread) {
1127 assert(thread != nullptr, "No thread");
1128 if (thread == nullptr) {
1129 return 0;
1130 }
1131 guarantee(Thread::current() != thread || thread->is_oop_safe(),
1132 "current cannot touch oops after its GC barrier is detached.");
1133 oop obj = thread->threadObj();
1134 return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj);
1135 }
1136
1137 /**
1138 * This function ought to be a void function, but cannot be because
1139 * it gets turned into a tail-call on sparc, which runs into dtrace bug
1140 * 6254741. Once that is fixed we can remove the dummy return value.
1141 */
1142 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1143 return dtrace_object_alloc(JavaThread::current(), o, o->size());
1144 }
1145
1146 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o) {
1147 return dtrace_object_alloc(thread, o, o->size());
1148 }
1149
1150 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size) {
1151 assert(DTraceAllocProbes, "wrong call");
1152 Klass* klass = o->klass();
1153 Symbol* name = klass->name();
1154 HOTSPOT_OBJECT_ALLOC(
1155 get_java_tid(thread),
1156 (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
1157 return 0;
1158 }
1159
1160 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
1161 JavaThread* current, Method* method))
1162 assert(current == JavaThread::current(), "pre-condition");
1163
1164 assert(DTraceMethodProbes, "wrong call");
1165 Symbol* kname = method->klass_name();
1166 Symbol* name = method->name();
1167 Symbol* sig = method->signature();
1168 HOTSPOT_METHOD_ENTRY(
1169 get_java_tid(current),
1170 (char *) kname->bytes(), kname->utf8_length(),
1171 (char *) name->bytes(), name->utf8_length(),
1172 (char *) sig->bytes(), sig->utf8_length());
1173 return 0;
1174 JRT_END
1175
1176 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
1177 JavaThread* current, Method* method))
1178 assert(current == JavaThread::current(), "pre-condition");
1179 assert(DTraceMethodProbes, "wrong call");
1180 Symbol* kname = method->klass_name();
1181 Symbol* name = method->name();
1182 Symbol* sig = method->signature();
1183 HOTSPOT_METHOD_RETURN(
1184 get_java_tid(current),
1185 (char *) kname->bytes(), kname->utf8_length(),
1186 (char *) name->bytes(), name->utf8_length(),
1187 (char *) sig->bytes(), sig->utf8_length());
1188 return 0;
1189 JRT_END
1190
1191
1192 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
1193 // for a call current in progress, i.e., arguments has been pushed on stack
1194 // put callee has not been invoked yet. Used by: resolve virtual/static,
1195 // vtable updates, etc. Caller frame must be compiled.
1196 Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1197 JavaThread* current = THREAD;
1198 ResourceMark rm(current);
1199
1200 // last java frame on stack (which includes native call frames)
1201 vframeStream vfst(current, true); // Do not skip and javaCalls
1202
1203 return find_callee_info_helper(vfst, bc, callinfo, THREAD);
1204 }
1205
1206 Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
1207 nmethod* caller = vfst.nm();
1208
1209 address pc = vfst.frame_pc();
1210 { // Get call instruction under lock because another thread may be busy patching it.
1211 CompiledICLocker ic_locker(caller);
1212 return caller->attached_method_before_pc(pc);
1213 }
1214 return nullptr;
1215 }
1216
1217 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1218 // for a call current in progress, i.e., arguments has been pushed on stack
1219 // but callee has not been invoked yet. Caller frame must be compiled.
1220 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1221 CallInfo& callinfo, TRAPS) {
1222 Handle receiver;
1223 Handle nullHandle; // create a handy null handle for exception returns
1224 JavaThread* current = THREAD;
1225
1226 assert(!vfst.at_end(), "Java frame must exist");
1227
1228 // Find caller and bci from vframe
1229 methodHandle caller(current, vfst.method());
1230 int bci = vfst.bci();
1231
1232 if (caller->is_continuation_enter_intrinsic()) {
1233 bc = Bytecodes::_invokestatic;
1234 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1235 return receiver;
1236 }
1237
1238 // Substitutability test implementation piggy backs on static call resolution
1239 Bytecodes::Code code = caller->java_code_at(bci);
1240 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1241 bc = Bytecodes::_invokestatic;
1242 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1243 assert(attached_method.not_null(), "must have attached method");
1244 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1245 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1246 #ifdef ASSERT
1247 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1248 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1249 #endif
1250 return receiver;
1251 }
1252
1253 Bytecode_invoke bytecode(caller, bci);
1254 int bytecode_index = bytecode.index();
1255 bc = bytecode.invoke_code();
1256
1257 methodHandle attached_method(current, extract_attached_method(vfst));
1258 if (attached_method.not_null()) {
1259 Method* callee = bytecode.static_target(CHECK_NH);
1260 vmIntrinsics::ID id = callee->intrinsic_id();
1261 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1262 // it attaches statically resolved method to the call site.
1263 if (MethodHandles::is_signature_polymorphic(id) &&
1264 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1265 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1266
1267 // Adjust invocation mode according to the attached method.
1268 switch (bc) {
1269 case Bytecodes::_invokevirtual:
1270 if (attached_method->method_holder()->is_interface()) {
1271 bc = Bytecodes::_invokeinterface;
1272 }
1273 break;
1274 case Bytecodes::_invokeinterface:
1275 if (!attached_method->method_holder()->is_interface()) {
1276 bc = Bytecodes::_invokevirtual;
1277 }
1278 break;
1279 case Bytecodes::_invokehandle:
1280 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1281 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1282 : Bytecodes::_invokevirtual;
1283 }
1284 break;
1285 default:
1286 break;
1287 }
1288 } else {
1289 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1290 if (!attached_method->method_holder()->is_inline_klass()) {
1291 // Ignore the attached method in this case to not confuse below code
1292 attached_method = methodHandle(current, nullptr);
1293 }
1294 }
1295 }
1296
1297 assert(bc != Bytecodes::_illegal, "not initialized");
1298
1299 bool has_receiver = bc != Bytecodes::_invokestatic &&
1300 bc != Bytecodes::_invokedynamic &&
1301 bc != Bytecodes::_invokehandle;
1302 bool check_null_and_abstract = true;
1303
1304 // Find receiver for non-static call
1305 if (has_receiver) {
1306 // This register map must be update since we need to find the receiver for
1307 // compiled frames. The receiver might be in a register.
1308 RegisterMap reg_map2(current,
1309 RegisterMap::UpdateMap::include,
1310 RegisterMap::ProcessFrames::include,
1311 RegisterMap::WalkContinuation::skip);
1312 frame stubFrame = current->last_frame();
1313 // Caller-frame is a compiled frame
1314 frame callerFrame = stubFrame.sender(®_map2);
1315
1316 Method* callee = attached_method();
1317 if (callee == nullptr) {
1318 callee = bytecode.static_target(CHECK_NH);
1319 if (callee == nullptr) {
1320 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1321 }
1322 }
1323 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1324 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1325 // If the receiver is an inline type that is passed as fields, no oop is available
1326 // Resolve the call without receiver null checking.
1327 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1328 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1329 if (bc == Bytecodes::_invokeinterface) {
1330 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1331 }
1332 check_null_and_abstract = false;
1333 } else {
1334 // Retrieve from a compiled argument list
1335 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1336 assert(oopDesc::is_oop_or_null(receiver()), "");
1337 if (receiver.is_null()) {
1338 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1339 }
1340 }
1341 }
1342
1343 // Resolve method
1344 if (attached_method.not_null()) {
1345 // Parameterized by attached method.
1346 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1347 } else {
1348 // Parameterized by bytecode.
1349 constantPoolHandle constants(current, caller->constants());
1350 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1351 }
1352
1353 #ifdef ASSERT
1354 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1355 if (has_receiver && check_null_and_abstract) {
1356 assert(receiver.not_null(), "should have thrown exception");
1357 Klass* receiver_klass = receiver->klass();
1358 Klass* rk = nullptr;
1359 if (attached_method.not_null()) {
1360 // In case there's resolved method attached, use its holder during the check.
1361 rk = attached_method->method_holder();
1362 } else {
1363 // Klass is already loaded.
1364 constantPoolHandle constants(current, caller->constants());
1365 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1366 }
1367 Klass* static_receiver_klass = rk;
1368 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1369 "actual receiver must be subclass of static receiver klass");
1370 if (receiver_klass->is_instance_klass()) {
1371 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1372 tty->print_cr("ERROR: Klass not yet initialized!!");
1373 receiver_klass->print();
1374 }
1375 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1376 }
1377 }
1378 #endif
1379
1380 return receiver;
1381 }
1382
1383 methodHandle SharedRuntime::find_callee_method(bool& caller_does_not_scalarize, TRAPS) {
1384 JavaThread* current = THREAD;
1385 ResourceMark rm(current);
1386 // We need first to check if any Java activations (compiled, interpreted)
1387 // exist on the stack since last JavaCall. If not, we need
1388 // to get the target method from the JavaCall wrapper.
1389 vframeStream vfst(current, true); // Do not skip any javaCalls
1390 methodHandle callee_method;
1391 if (vfst.at_end()) {
1392 // No Java frames were found on stack since we did the JavaCall.
1393 // Hence the stack can only contain an entry_frame. We need to
1394 // find the target method from the stub frame.
1395 RegisterMap reg_map(current,
1396 RegisterMap::UpdateMap::skip,
1397 RegisterMap::ProcessFrames::include,
1398 RegisterMap::WalkContinuation::skip);
1399 frame fr = current->last_frame();
1400 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1401 fr = fr.sender(®_map);
1402 assert(fr.is_entry_frame(), "must be");
1403 // fr is now pointing to the entry frame.
1404 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1405 } else {
1406 Bytecodes::Code bc;
1407 CallInfo callinfo;
1408 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1409 // Calls via mismatching methods are always non-scalarized
1410 if (callinfo.resolved_method()->mismatch()) {
1411 caller_does_not_scalarize = true;
1412 }
1413 callee_method = methodHandle(current, callinfo.selected_method());
1414 }
1415 assert(callee_method()->is_method(), "must be");
1416 return callee_method;
1417 }
1418
1419 // Resolves a call.
1420 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1421 JavaThread* current = THREAD;
1422 ResourceMark rm(current);
1423 RegisterMap cbl_map(current,
1424 RegisterMap::UpdateMap::skip,
1425 RegisterMap::ProcessFrames::include,
1426 RegisterMap::WalkContinuation::skip);
1427 frame caller_frame = current->last_frame().sender(&cbl_map);
1428
1429 CodeBlob* caller_cb = caller_frame.cb();
1430 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1431 nmethod* caller_nm = caller_cb->as_nmethod();
1432
1433 // determine call info & receiver
1434 // note: a) receiver is null for static calls
1435 // b) an exception is thrown if receiver is null for non-static calls
1436 CallInfo call_info;
1437 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1438 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1439
1440 NoSafepointVerifier nsv;
1441
1442 methodHandle callee_method(current, call_info.selected_method());
1443 // Calls via mismatching methods are always non-scalarized
1444 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1445 caller_does_not_scalarize = true;
1446 }
1447
1448 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1449 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1450 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1451 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1452 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1453
1454 assert(!caller_nm->is_unloading(), "It should not be unloading");
1455
1456 #ifndef PRODUCT
1457 // tracing/debugging/statistics
1458 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1459 (is_virtual) ? (&_resolve_virtual_ctr) :
1460 (&_resolve_static_ctr);
1461 AtomicAccess::inc(addr);
1462
1463 if (TraceCallFixup) {
1464 ResourceMark rm(current);
1465 tty->print("resolving %s%s (%s) %s call to",
1466 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1467 Bytecodes::name(invoke_code), (caller_does_not_scalarize) ? "non-scalar" : "");
1468 callee_method->print_short_name(tty);
1469 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1470 p2i(caller_frame.pc()), p2i(callee_method->code()));
1471 }
1472 #endif
1473
1474 if (invoke_code == Bytecodes::_invokestatic) {
1475 assert(callee_method->method_holder()->is_initialized() ||
1476 callee_method->method_holder()->is_reentrant_initialization(current),
1477 "invalid class initialization state for invoke_static");
1478 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1479 // In order to keep class initialization check, do not patch call
1480 // site for static call when the class is not fully initialized.
1481 // Proper check is enforced by call site re-resolution on every invocation.
1482 //
1483 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1484 // explicit class initialization check is put in nmethod entry (VEP).
1485 assert(callee_method->method_holder()->is_linked(), "must be");
1486 return callee_method;
1487 }
1488 }
1489
1490
1491 // JSR 292 key invariant:
1492 // If the resolved method is a MethodHandle invoke target, the call
1493 // site must be a MethodHandle call site, because the lambda form might tail-call
1494 // leaving the stack in a state unknown to either caller or callee
1495
1496 // Compute entry points. The computation of the entry points is independent of
1497 // patching the call.
1498
1499 // Make sure the callee nmethod does not get deoptimized and removed before
1500 // we are done patching the code.
1501
1502
1503 CompiledICLocker ml(caller_nm);
1504 if (is_virtual && !is_optimized) {
1505 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1506 inline_cache->update(&call_info, receiver->klass(), caller_does_not_scalarize);
1507 } else {
1508 // Callsite is a direct call - set it to the destination method
1509 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1510 callsite->set(callee_method, caller_does_not_scalarize);
1511 }
1512
1513 return callee_method;
1514 }
1515
1516 // Inline caches exist only in compiled code
1517 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1518 #ifdef ASSERT
1519 RegisterMap reg_map(current,
1520 RegisterMap::UpdateMap::skip,
1521 RegisterMap::ProcessFrames::include,
1522 RegisterMap::WalkContinuation::skip);
1523 frame stub_frame = current->last_frame();
1524 assert(stub_frame.is_runtime_frame(), "sanity check");
1525 frame caller_frame = stub_frame.sender(®_map);
1526 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1527 #endif /* ASSERT */
1528
1529 methodHandle callee_method;
1530 const bool is_optimized = false;
1531 bool caller_does_not_scalarize = false;
1532 JRT_BLOCK
1533 callee_method = SharedRuntime::handle_ic_miss_helper(caller_does_not_scalarize, CHECK_NULL);
1534 // Return Method* through TLS
1535 current->set_vm_result_metadata(callee_method());
1536 JRT_BLOCK_END
1537 // return compiled code entry point after potential safepoints
1538 return get_resolved_entry(current, callee_method, false, is_optimized, caller_does_not_scalarize);
1539 JRT_END
1540
1541
1542 // Handle call site that has been made non-entrant
1543 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1544 // 6243940 We might end up in here if the callee is deoptimized
1545 // as we race to call it. We don't want to take a safepoint if
1546 // the caller was interpreted because the caller frame will look
1547 // interpreted to the stack walkers and arguments are now
1548 // "compiled" so it is much better to make this transition
1549 // invisible to the stack walking code. The i2c path will
1550 // place the callee method in the callee_target. It is stashed
1551 // there because if we try and find the callee by normal means a
1552 // safepoint is possible and have trouble gc'ing the compiled args.
1553 RegisterMap reg_map(current,
1554 RegisterMap::UpdateMap::skip,
1555 RegisterMap::ProcessFrames::include,
1556 RegisterMap::WalkContinuation::skip);
1557 frame stub_frame = current->last_frame();
1558 assert(stub_frame.is_runtime_frame(), "sanity check");
1559 frame caller_frame = stub_frame.sender(®_map);
1560
1561 if (caller_frame.is_interpreted_frame() ||
1562 caller_frame.is_entry_frame() ||
1563 caller_frame.is_upcall_stub_frame()) {
1564 Method* callee = current->callee_target();
1565 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1566 current->set_vm_result_metadata(callee);
1567 current->set_callee_target(nullptr);
1568 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1569 // Bypass class initialization checks in c2i when caller is in native.
1570 // JNI calls to static methods don't have class initialization checks.
1571 // Fast class initialization checks are present in c2i adapters and call into
1572 // SharedRuntime::handle_wrong_method() on the slow path.
1573 //
1574 // JVM upcalls may land here as well, but there's a proper check present in
1575 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1576 // so bypassing it in c2i adapter is benign.
1577 return callee->get_c2i_no_clinit_check_entry();
1578 } else {
1579 if (caller_frame.is_interpreted_frame()) {
1580 return callee->get_c2i_inline_entry();
1581 } else {
1582 return callee->get_c2i_entry();
1583 }
1584 }
1585 }
1586
1587 // Must be compiled to compiled path which is safe to stackwalk
1588 methodHandle callee_method;
1589 bool is_static_call = false;
1590 bool is_optimized = false;
1591 bool caller_does_not_scalarize = false;
1592 JRT_BLOCK
1593 // Force resolving of caller (if we called from compiled frame)
1594 callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_does_not_scalarize, CHECK_NULL);
1595 current->set_vm_result_metadata(callee_method());
1596 JRT_BLOCK_END
1597 // return compiled code entry point after potential safepoints
1598 return get_resolved_entry(current, callee_method, is_static_call, is_optimized, caller_does_not_scalarize);
1599 JRT_END
1600
1601 // Handle abstract method call
1602 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1603 // Verbose error message for AbstractMethodError.
1604 // Get the called method from the invoke bytecode.
1605 vframeStream vfst(current, true);
1606 assert(!vfst.at_end(), "Java frame must exist");
1607 methodHandle caller(current, vfst.method());
1608 Bytecode_invoke invoke(caller, vfst.bci());
1609 DEBUG_ONLY( invoke.verify(); )
1610
1611 // Find the compiled caller frame.
1612 RegisterMap reg_map(current,
1613 RegisterMap::UpdateMap::include,
1614 RegisterMap::ProcessFrames::include,
1615 RegisterMap::WalkContinuation::skip);
1616 frame stubFrame = current->last_frame();
1617 assert(stubFrame.is_runtime_frame(), "must be");
1618 frame callerFrame = stubFrame.sender(®_map);
1619 assert(callerFrame.is_compiled_frame(), "must be");
1620
1621 // Install exception and return forward entry.
1622 address res = SharedRuntime::throw_AbstractMethodError_entry();
1623 JRT_BLOCK
1624 methodHandle callee(current, invoke.static_target(current));
1625 if (!callee.is_null()) {
1626 oop recv = callerFrame.retrieve_receiver(®_map);
1627 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1628 res = StubRoutines::forward_exception_entry();
1629 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1630 }
1631 JRT_BLOCK_END
1632 return res;
1633 JRT_END
1634
1635 // return verified_code_entry if interp_only_mode is not set for the current thread;
1636 // otherwise return c2i entry.
1637 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1638 bool is_static_call, bool is_optimized, bool caller_does_not_scalarize) {
1639 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1640 // In interp_only_mode we need to go to the interpreted entry
1641 // The c2i won't patch in this mode -- see fixup_callers_callsite
1642 return callee_method->get_c2i_entry();
1643 }
1644
1645 if (caller_does_not_scalarize) {
1646 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1647 return callee_method->verified_inline_code_entry();
1648 } else if (is_static_call || is_optimized) {
1649 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1650 return callee_method->verified_code_entry();
1651 } else {
1652 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1653 return callee_method->verified_inline_ro_code_entry();
1654 }
1655 }
1656
1657 // resolve a static call and patch code
1658 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1659 methodHandle callee_method;
1660 bool caller_does_not_scalarize = false;
1661 bool enter_special = false;
1662 JRT_BLOCK
1663 callee_method = SharedRuntime::resolve_helper(false, false, caller_does_not_scalarize, CHECK_NULL);
1664 current->set_vm_result_metadata(callee_method());
1665 JRT_BLOCK_END
1666 // return compiled code entry point after potential safepoints
1667 return get_resolved_entry(current, callee_method, true, false, caller_does_not_scalarize);
1668 JRT_END
1669
1670 // resolve virtual call and update inline cache to monomorphic
1671 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1672 methodHandle callee_method;
1673 bool caller_does_not_scalarize = false;
1674 JRT_BLOCK
1675 callee_method = SharedRuntime::resolve_helper(true, false, caller_does_not_scalarize, CHECK_NULL);
1676 current->set_vm_result_metadata(callee_method());
1677 JRT_BLOCK_END
1678 // return compiled code entry point after potential safepoints
1679 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1680 JRT_END
1681
1682
1683 // Resolve a virtual call that can be statically bound (e.g., always
1684 // monomorphic, so it has no inline cache). Patch code to resolved target.
1685 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1686 methodHandle callee_method;
1687 bool caller_does_not_scalarize = false;
1688 JRT_BLOCK
1689 callee_method = SharedRuntime::resolve_helper(true, true, caller_does_not_scalarize, CHECK_NULL);
1690 current->set_vm_result_metadata(callee_method());
1691 JRT_BLOCK_END
1692 // return compiled code entry point after potential safepoints
1693 return get_resolved_entry(current, callee_method, false, true, caller_does_not_scalarize);
1694 JRT_END
1695
1696
1697
1698 methodHandle SharedRuntime::handle_ic_miss_helper(bool& caller_does_not_scalarize, TRAPS) {
1699 JavaThread* current = THREAD;
1700 ResourceMark rm(current);
1701 CallInfo call_info;
1702 Bytecodes::Code bc;
1703
1704 // receiver is null for static calls. An exception is thrown for null
1705 // receivers for non-static calls
1706 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1707
1708 methodHandle callee_method(current, call_info.selected_method());
1709
1710 #ifndef PRODUCT
1711 AtomicAccess::inc(&_ic_miss_ctr);
1712
1713 // Statistics & Tracing
1714 if (TraceCallFixup) {
1715 ResourceMark rm(current);
1716 tty->print("IC miss (%s) %s call to", Bytecodes::name(bc), (caller_does_not_scalarize) ? "non-scalar" : "");
1717 callee_method->print_short_name(tty);
1718 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1719 }
1720
1721 if (ICMissHistogram) {
1722 MutexLocker m(VMStatistic_lock);
1723 RegisterMap reg_map(current,
1724 RegisterMap::UpdateMap::skip,
1725 RegisterMap::ProcessFrames::include,
1726 RegisterMap::WalkContinuation::skip);
1727 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1728 // produce statistics under the lock
1729 trace_ic_miss(f.pc());
1730 }
1731 #endif
1732
1733 // install an event collector so that when a vtable stub is created the
1734 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1735 // event can't be posted when the stub is created as locks are held
1736 // - instead the event will be deferred until the event collector goes
1737 // out of scope.
1738 JvmtiDynamicCodeEventCollector event_collector;
1739
1740 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1741 RegisterMap reg_map(current,
1742 RegisterMap::UpdateMap::skip,
1743 RegisterMap::ProcessFrames::include,
1744 RegisterMap::WalkContinuation::skip);
1745 frame caller_frame = current->last_frame().sender(®_map);
1746 CodeBlob* cb = caller_frame.cb();
1747 nmethod* caller_nm = cb->as_nmethod();
1748 // Calls via mismatching methods are always non-scalarized
1749 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1750 caller_does_not_scalarize = true;
1751 }
1752
1753 CompiledICLocker ml(caller_nm);
1754 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1755 inline_cache->update(&call_info, receiver()->klass(), caller_does_not_scalarize);
1756
1757 return callee_method;
1758 }
1759
1760 //
1761 // Resets a call-site in compiled code so it will get resolved again.
1762 // This routines handles both virtual call sites, optimized virtual call
1763 // sites, and static call sites. Typically used to change a call sites
1764 // destination from compiled to interpreted.
1765 //
1766 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1767 JavaThread* current = THREAD;
1768 ResourceMark rm(current);
1769 RegisterMap reg_map(current,
1770 RegisterMap::UpdateMap::skip,
1771 RegisterMap::ProcessFrames::include,
1772 RegisterMap::WalkContinuation::skip);
1773 frame stub_frame = current->last_frame();
1774 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1775 frame caller = stub_frame.sender(®_map);
1776 if (caller.is_compiled_frame()) {
1777 caller_does_not_scalarize = caller.cb()->as_nmethod()->is_compiled_by_c1();
1778 }
1779
1780 // Do nothing if the frame isn't a live compiled frame.
1781 // nmethod could be deoptimized by the time we get here
1782 // so no update to the caller is needed.
1783
1784 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1785 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1786
1787 address pc = caller.pc();
1788
1789 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1790 assert(caller_nm != nullptr, "did not find caller nmethod");
1791
1792 // Default call_addr is the location of the "basic" call.
1793 // Determine the address of the call we a reresolving. With
1794 // Inline Caches we will always find a recognizable call.
1795 // With Inline Caches disabled we may or may not find a
1796 // recognizable call. We will always find a call for static
1797 // calls and for optimized virtual calls. For vanilla virtual
1798 // calls it depends on the state of the UseInlineCaches switch.
1799 //
1800 // With Inline Caches disabled we can get here for a virtual call
1801 // for two reasons:
1802 // 1 - calling an abstract method. The vtable for abstract methods
1803 // will run us thru handle_wrong_method and we will eventually
1804 // end up in the interpreter to throw the ame.
1805 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1806 // call and between the time we fetch the entry address and
1807 // we jump to it the target gets deoptimized. Similar to 1
1808 // we will wind up in the interprter (thru a c2i with c2).
1809 //
1810 CompiledICLocker ml(caller_nm);
1811 address call_addr = caller_nm->call_instruction_address(pc);
1812
1813 if (call_addr != nullptr) {
1814 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1815 // bytes back in the instruction stream so we must also check for reloc info.
1816 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1817 bool ret = iter.next(); // Get item
1818 if (ret) {
1819 is_static_call = false;
1820 is_optimized = false;
1821 switch (iter.type()) {
1822 case relocInfo::static_call_type:
1823 is_static_call = true;
1824 case relocInfo::opt_virtual_call_type: {
1825 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1826 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1827 cdc->set_to_clean();
1828 break;
1829 }
1830 case relocInfo::virtual_call_type: {
1831 // compiled, dispatched call (which used to call an interpreted method)
1832 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1833 inline_cache->set_to_clean();
1834 break;
1835 }
1836 default:
1837 break;
1838 }
1839 }
1840 }
1841 }
1842
1843 methodHandle callee_method = find_callee_method(caller_does_not_scalarize, CHECK_(methodHandle()));
1844
1845 #ifndef PRODUCT
1846 AtomicAccess::inc(&_wrong_method_ctr);
1847
1848 if (TraceCallFixup) {
1849 ResourceMark rm(current);
1850 tty->print("handle_wrong_method reresolving %s call to", (caller_does_not_scalarize) ? "non-scalar" : "");
1851 callee_method->print_short_name(tty);
1852 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1853 }
1854 #endif
1855
1856 return callee_method;
1857 }
1858
1859 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1860 // The faulting unsafe accesses should be changed to throw the error
1861 // synchronously instead. Meanwhile the faulting instruction will be
1862 // skipped over (effectively turning it into a no-op) and an
1863 // asynchronous exception will be raised which the thread will
1864 // handle at a later point. If the instruction is a load it will
1865 // return garbage.
1866
1867 // Request an async exception.
1868 thread->set_pending_unsafe_access_error();
1869
1870 // Return address of next instruction to execute.
1871 return next_pc;
1872 }
1873
1874 #ifdef ASSERT
1875 void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
1876 const BasicType* sig_bt,
1877 const VMRegPair* regs) {
1878 ResourceMark rm;
1879 const int total_args_passed = method->size_of_parameters();
1880 const VMRegPair* regs_with_member_name = regs;
1881 VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1882
1883 const int member_arg_pos = total_args_passed - 1;
1884 assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1885 assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1886
1887 java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
1888
1889 for (int i = 0; i < member_arg_pos; i++) {
1890 VMReg a = regs_with_member_name[i].first();
1891 VMReg b = regs_without_member_name[i].first();
1892 assert(a->value() == b->value(), "register allocation mismatch: a= %d, b= %d", a->value(), b->value());
1893 }
1894 assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1895 }
1896 #endif
1897
1898 // ---------------------------------------------------------------------------
1899 // We are calling the interpreter via a c2i. Normally this would mean that
1900 // we were called by a compiled method. However we could have lost a race
1901 // where we went int -> i2c -> c2i and so the caller could in fact be
1902 // interpreted. If the caller is compiled we attempt to patch the caller
1903 // so he no longer calls into the interpreter.
1904 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1905 AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
1906
1907 // It's possible that deoptimization can occur at a call site which hasn't
1908 // been resolved yet, in which case this function will be called from
1909 // an nmethod that has been patched for deopt and we can ignore the
1910 // request for a fixup.
1911 // Also it is possible that we lost a race in that from_compiled_entry
1912 // is now back to the i2c in that case we don't need to patch and if
1913 // we did we'd leap into space because the callsite needs to use
1914 // "to interpreter" stub in order to load up the Method*. Don't
1915 // ask me how I know this...
1916
1917 // Result from nmethod::is_unloading is not stable across safepoints.
1918 NoSafepointVerifier nsv;
1919
1920 nmethod* callee = method->code();
1921 if (callee == nullptr) {
1922 return;
1923 }
1924
1925 // write lock needed because we might patch call site by set_to_clean()
1926 // and is_unloading() can modify nmethod's state
1927 MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current()));
1928
1929 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1930 if (cb == nullptr || !cb->is_nmethod() || !callee->is_in_use() || callee->is_unloading()) {
1931 return;
1932 }
1933
1934 // The check above makes sure this is an nmethod.
1935 nmethod* caller = cb->as_nmethod();
1936
1937 // Get the return PC for the passed caller PC.
1938 address return_pc = caller_pc + frame::pc_return_offset;
1939
1940 if (!caller->is_in_use() || !NativeCall::is_call_before(return_pc)) {
1941 return;
1942 }
1943
1944 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1945 CompiledICLocker ic_locker(caller);
1946 ResourceMark rm;
1947
1948 // If we got here through a static call or opt_virtual call, then we know where the
1949 // call address would be; let's peek at it
1950 address callsite_addr = (address)nativeCall_before(return_pc);
1951 RelocIterator iter(caller, callsite_addr, callsite_addr + 1);
1952 if (!iter.next()) {
1953 // No reloc entry found; not a static or optimized virtual call
1954 return;
1955 }
1956
1957 relocInfo::relocType type = iter.reloc()->type();
1958 if (type != relocInfo::static_call_type &&
1959 type != relocInfo::opt_virtual_call_type) {
1960 return;
1961 }
1962
1963 CompiledDirectCall* callsite = CompiledDirectCall::before(return_pc);
1964 callsite->set_to_clean();
1965 JRT_END
1966
1967
1968 // same as JVM_Arraycopy, but called directly from compiled code
1969 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
1970 oopDesc* dest, jint dest_pos,
1971 jint length,
1972 JavaThread* current)) {
1973 #ifndef PRODUCT
1974 _slow_array_copy_ctr++;
1975 #endif
1976 // Check if we have null pointers
1977 if (src == nullptr || dest == nullptr) {
1978 THROW(vmSymbols::java_lang_NullPointerException());
1979 }
1980 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
1981 // even though the copy_array API also performs dynamic checks to ensure
1982 // that src and dest are truly arrays (and are conformable).
1983 // The copy_array mechanism is awkward and could be removed, but
1984 // the compilers don't call this function except as a last resort,
1985 // so it probably doesn't matter.
1986 src->klass()->copy_array((arrayOopDesc*)src, src_pos,
1987 (arrayOopDesc*)dest, dest_pos,
1988 length, current);
1989 }
1990 JRT_END
1991
1992 // The caller of generate_class_cast_message() (or one of its callers)
1993 // must use a ResourceMark in order to correctly free the result.
1994 char* SharedRuntime::generate_class_cast_message(
1995 JavaThread* thread, Klass* caster_klass) {
1996
1997 // Get target class name from the checkcast instruction
1998 vframeStream vfst(thread, true);
1999 assert(!vfst.at_end(), "Java frame must exist");
2000 Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
2001 constantPoolHandle cpool(thread, vfst.method()->constants());
2002 Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index());
2003 Symbol* target_klass_name = nullptr;
2004 if (target_klass == nullptr) {
2005 // This klass should be resolved, but just in case, get the name in the klass slot.
2006 target_klass_name = cpool->klass_name_at(cc.index());
2007 }
2008 return generate_class_cast_message(caster_klass, target_klass, target_klass_name);
2009 }
2010
2011
2012 // The caller of generate_class_cast_message() (or one of its callers)
2013 // must use a ResourceMark in order to correctly free the result.
2014 char* SharedRuntime::generate_class_cast_message(
2015 Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) {
2016 const char* caster_name = caster_klass->external_name();
2017
2018 assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided");
2019 const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() :
2020 target_klass->external_name();
2021
2022 size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1;
2023
2024 const char* caster_klass_description = "";
2025 const char* target_klass_description = "";
2026 const char* klass_separator = "";
2027 if (target_klass != nullptr && caster_klass->module() == target_klass->module()) {
2028 caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass);
2029 } else {
2030 caster_klass_description = caster_klass->class_in_module_of_loader();
2031 target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : "";
2032 klass_separator = (target_klass != nullptr) ? "; " : "";
2033 }
2034
2035 // add 3 for parenthesis and preceding space
2036 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2037
2038 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2039 if (message == nullptr) {
2040 // Shouldn't happen, but don't cause even more problems if it does
2041 message = const_cast<char*>(caster_klass->external_name());
2042 } else {
2043 jio_snprintf(message,
2044 msglen,
2045 "class %s cannot be cast to class %s (%s%s%s)",
2046 caster_name,
2047 target_name,
2048 caster_klass_description,
2049 klass_separator,
2050 target_klass_description
2051 );
2052 }
2053 return message;
2054 }
2055
2056 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2057 assert(klass->is_inline_klass(), "Must be a concrete value class");
2058 const char* desc = "Cannot synchronize on an instance of value class ";
2059 const char* className = klass->external_name();
2060 size_t msglen = strlen(desc) + strlen(className) + 1;
2061 char* message = NEW_RESOURCE_ARRAY(char, msglen);
2062 if (nullptr == message) {
2063 // Out of memory: can't create detailed error message
2064 message = const_cast<char*>(klass->external_name());
2065 } else {
2066 jio_snprintf(message, msglen, "%s%s", desc, className);
2067 }
2068 return message;
2069 }
2070
2071 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2072 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2073 JRT_END
2074
2075 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2076 if (!SafepointSynchronize::is_synchronizing()) {
2077 // Only try quick_enter() if we're not trying to reach a safepoint
2078 // so that the calling thread reaches the safepoint more quickly.
2079 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2080 return;
2081 }
2082 }
2083 // NO_ASYNC required because an async exception on the state transition destructor
2084 // would leave you with the lock held and it would never be released.
2085 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2086 // and the model is that an exception implies the method failed.
2087 JRT_BLOCK_NO_ASYNC
2088 Handle h_obj(THREAD, obj);
2089 ObjectSynchronizer::enter(h_obj, lock, current);
2090 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2091 JRT_BLOCK_END
2092 }
2093
2094 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2095 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2096 SharedRuntime::monitor_enter_helper(obj, lock, current);
2097 JRT_END
2098
2099 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2100 assert(JavaThread::current() == current, "invariant");
2101 // Exit must be non-blocking, and therefore no exceptions can be thrown.
2102 ExceptionMark em(current);
2103
2104 // Check if C2_MacroAssembler::fast_unlock() or
2105 // C2_MacroAssembler::fast_unlock_lightweight() unlocked an inflated
2106 // monitor before going slow path. Since there is no safepoint
2107 // polling when calling into the VM, we can be sure that the monitor
2108 // hasn't been deallocated.
2109 ObjectMonitor* m = current->unlocked_inflated_monitor();
2110 if (m != nullptr) {
2111 assert(!m->has_owner(current), "must be");
2112 current->clear_unlocked_inflated_monitor();
2113
2114 // We need to reacquire the lock before we can call ObjectSynchronizer::exit().
2115 if (!m->try_enter(current, /*check_for_recursion*/ false)) {
2116 // Some other thread acquired the lock (or the monitor was
2117 // deflated). Either way we are done.
2118 return;
2119 }
2120 }
2121
2122 // The object could become unlocked through a JNI call, which we have no other checks for.
2123 // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2124 if (obj->is_unlocked()) {
2125 if (CheckJNICalls) {
2126 fatal("Object has been unlocked by JNI");
2127 }
2128 return;
2129 }
2130 ObjectSynchronizer::exit(obj, lock, current);
2131 }
2132
2133 // Handles the uncommon cases of monitor unlocking in compiled code
2134 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2135 assert(current == JavaThread::current(), "pre-condition");
2136 SharedRuntime::monitor_exit_helper(obj, lock, current);
2137 JRT_END
2138
2139 #ifndef PRODUCT
2140
2141 void SharedRuntime::print_statistics() {
2142 ttyLocker ttyl;
2143 if (xtty != nullptr) xtty->head("statistics type='SharedRuntime'");
2144
2145 SharedRuntime::print_ic_miss_histogram();
2146
2147 // Dump the JRT_ENTRY counters
2148 if (_new_instance_ctr) tty->print_cr("%5u new instance requires GC", _new_instance_ctr);
2149 if (_new_array_ctr) tty->print_cr("%5u new array requires GC", _new_array_ctr);
2150 if (_multi2_ctr) tty->print_cr("%5u multianewarray 2 dim", _multi2_ctr);
2151 if (_multi3_ctr) tty->print_cr("%5u multianewarray 3 dim", _multi3_ctr);
2152 if (_multi4_ctr) tty->print_cr("%5u multianewarray 4 dim", _multi4_ctr);
2153 if (_multi5_ctr) tty->print_cr("%5u multianewarray 5 dim", _multi5_ctr);
2154
2155 tty->print_cr("%5u inline cache miss in compiled", _ic_miss_ctr);
2156 tty->print_cr("%5u wrong method", _wrong_method_ctr);
2157 tty->print_cr("%5u unresolved static call site", _resolve_static_ctr);
2158 tty->print_cr("%5u unresolved virtual call site", _resolve_virtual_ctr);
2159 tty->print_cr("%5u unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2160
2161 if (_mon_enter_stub_ctr) tty->print_cr("%5u monitor enter stub", _mon_enter_stub_ctr);
2162 if (_mon_exit_stub_ctr) tty->print_cr("%5u monitor exit stub", _mon_exit_stub_ctr);
2163 if (_mon_enter_ctr) tty->print_cr("%5u monitor enter slow", _mon_enter_ctr);
2164 if (_mon_exit_ctr) tty->print_cr("%5u monitor exit slow", _mon_exit_ctr);
2165 if (_partial_subtype_ctr) tty->print_cr("%5u slow partial subtype", _partial_subtype_ctr);
2166 if (_jbyte_array_copy_ctr) tty->print_cr("%5u byte array copies", _jbyte_array_copy_ctr);
2167 if (_jshort_array_copy_ctr) tty->print_cr("%5u short array copies", _jshort_array_copy_ctr);
2168 if (_jint_array_copy_ctr) tty->print_cr("%5u int array copies", _jint_array_copy_ctr);
2169 if (_jlong_array_copy_ctr) tty->print_cr("%5u long array copies", _jlong_array_copy_ctr);
2170 if (_oop_array_copy_ctr) tty->print_cr("%5u oop array copies", _oop_array_copy_ctr);
2171 if (_checkcast_array_copy_ctr) tty->print_cr("%5u checkcast array copies", _checkcast_array_copy_ctr);
2172 if (_unsafe_array_copy_ctr) tty->print_cr("%5u unsafe array copies", _unsafe_array_copy_ctr);
2173 if (_generic_array_copy_ctr) tty->print_cr("%5u generic array copies", _generic_array_copy_ctr);
2174 if (_slow_array_copy_ctr) tty->print_cr("%5u slow array copies", _slow_array_copy_ctr);
2175 if (_find_handler_ctr) tty->print_cr("%5u find exception handler", _find_handler_ctr);
2176 if (_rethrow_ctr) tty->print_cr("%5u rethrow handler", _rethrow_ctr);
2177 if (_unsafe_set_memory_ctr) tty->print_cr("%5u unsafe set memorys", _unsafe_set_memory_ctr);
2178
2179 AdapterHandlerLibrary::print_statistics();
2180
2181 if (xtty != nullptr) xtty->tail("statistics");
2182 }
2183
2184 inline double percent(int64_t x, int64_t y) {
2185 return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
2186 }
2187
2188 class MethodArityHistogram {
2189 public:
2190 enum { MAX_ARITY = 256 };
2191 private:
2192 static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2193 static uint64_t _size_histogram[MAX_ARITY]; // histogram of arg size in words
2194 static uint64_t _total_compiled_calls;
2195 static uint64_t _max_compiled_calls_per_method;
2196 static int _max_arity; // max. arity seen
2197 static int _max_size; // max. arg size seen
2198
2199 static void add_method_to_histogram(nmethod* nm) {
2200 Method* method = (nm == nullptr) ? nullptr : nm->method();
2201 if (method != nullptr) {
2202 ArgumentCount args(method->signature());
2203 int arity = args.size() + (method->is_static() ? 0 : 1);
2204 int argsize = method->size_of_parameters();
2205 arity = MIN2(arity, MAX_ARITY-1);
2206 argsize = MIN2(argsize, MAX_ARITY-1);
2207 uint64_t count = (uint64_t)method->compiled_invocation_count();
2208 _max_compiled_calls_per_method = count > _max_compiled_calls_per_method ? count : _max_compiled_calls_per_method;
2209 _total_compiled_calls += count;
2210 _arity_histogram[arity] += count;
2211 _size_histogram[argsize] += count;
2212 _max_arity = MAX2(_max_arity, arity);
2213 _max_size = MAX2(_max_size, argsize);
2214 }
2215 }
2216
2217 void print_histogram_helper(int n, uint64_t* histo, const char* name) {
2218 const int N = MIN2(9, n);
2219 double sum = 0;
2220 double weighted_sum = 0;
2221 for (int i = 0; i <= n; i++) { sum += (double)histo[i]; weighted_sum += (double)(i*histo[i]); }
2222 if (sum >= 1) { // prevent divide by zero or divide overflow
2223 double rest = sum;
2224 double percent = sum / 100;
2225 for (int i = 0; i <= N; i++) {
2226 rest -= (double)histo[i];
2227 tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], (double)histo[i] / percent);
2228 }
2229 tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent);
2230 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2231 tty->print_cr("(total # of compiled calls = " INT64_FORMAT_W(14) ")", _total_compiled_calls);
2232 tty->print_cr("(max # of compiled calls = " INT64_FORMAT_W(14) ")", _max_compiled_calls_per_method);
2233 } else {
2234 tty->print_cr("Histogram generation failed for %s. n = %d, sum = %7.5f", name, n, sum);
2235 }
2236 }
2237
2238 void print_histogram() {
2239 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2240 print_histogram_helper(_max_arity, _arity_histogram, "arity");
2241 tty->print_cr("\nHistogram of parameter block size (in words, incl. rcvr):");
2242 print_histogram_helper(_max_size, _size_histogram, "size");
2243 tty->cr();
2244 }
2245
2246 public:
2247 MethodArityHistogram() {
2248 // Take the Compile_lock to protect against changes in the CodeBlob structures
2249 MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2250 // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2251 MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2252 _max_arity = _max_size = 0;
2253 _total_compiled_calls = 0;
2254 _max_compiled_calls_per_method = 0;
2255 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2256 CodeCache::nmethods_do(add_method_to_histogram);
2257 print_histogram();
2258 }
2259 };
2260
2261 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2262 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2263 uint64_t MethodArityHistogram::_total_compiled_calls;
2264 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2265 int MethodArityHistogram::_max_arity;
2266 int MethodArityHistogram::_max_size;
2267
2268 void SharedRuntime::print_call_statistics(uint64_t comp_total) {
2269 tty->print_cr("Calls from compiled code:");
2270 int64_t total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2271 int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2272 int64_t mono_i = _nof_interface_calls;
2273 tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%) total non-inlined ", total);
2274 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
2275 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2276 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
2277 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2278 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
2279 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2280 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
2281 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2282 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2283 tty->cr();
2284 tty->print_cr("Note 1: counter updates are not MT-safe.");
2285 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2286 tty->print_cr(" %% in nested categories are relative to their category");
2287 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2288 tty->cr();
2289
2290 MethodArityHistogram h;
2291 }
2292 #endif
2293
2294 #ifndef PRODUCT
2295 static int _lookups; // number of calls to lookup
2296 static int _equals; // number of buckets checked with matching hash
2297 static int _archived_hits; // number of successful lookups in archived table
2298 static int _runtime_hits; // number of successful lookups in runtime table
2299 #endif
2300
2301 // A simple wrapper class around the calling convention information
2302 // that allows sharing of adapters for the same calling convention.
2303 class AdapterFingerPrint : public MetaspaceObj {
2304 public:
2305 class Element {
2306 private:
2307 // The highest byte is the type of the argument. The remaining bytes contain the offset of the
2308 // field if it is flattened in the calling convention, -1 otherwise.
2309 juint _payload;
2310
2311 static constexpr int offset_bit_width = 24;
2312 static constexpr juint offset_bit_mask = (1 << offset_bit_width) - 1;
2313 public:
2314 Element(BasicType bt, int offset) : _payload((static_cast<juint>(bt) << offset_bit_width) | (juint(offset) & offset_bit_mask)) {
2315 assert(offset >= -1 && offset < jint(offset_bit_mask), "invalid offset %d", offset);
2316 }
2317
2318 BasicType bt() const {
2319 return static_cast<BasicType>(_payload >> offset_bit_width);
2320 }
2321
2322 int offset() const {
2323 juint res = _payload & offset_bit_mask;
2324 return res == offset_bit_mask ? -1 : res;
2325 }
2326
2327 juint hash() const {
2328 return _payload;
2329 }
2330
2331 bool operator!=(const Element& other) const {
2332 return _payload != other._payload;
2333 }
2334 };
2335
2336 private:
2337 const bool _has_ro_adapter;
2338 const int _length;
2339
2340 static int data_offset() { return sizeof(AdapterFingerPrint); }
2341 Element* data_pointer() {
2342 return reinterpret_cast<Element*>(reinterpret_cast<address>(this) + data_offset());
2343 }
2344
2345 const Element& element_at(int index) {
2346 assert(index < length(), "index %d out of bounds for length %d", index, length());
2347 Element* data = data_pointer();
2348 return data[index];
2349 }
2350
2351 // Private construtor. Use allocate() to get an instance.
2352 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter)
2353 : _has_ro_adapter(has_ro_adapter), _length(total_args_passed_in_sig(sig)) {
2354 Element* data = data_pointer();
2355 BasicType prev_bt = T_ILLEGAL;
2356 int vt_count = 0;
2357 for (int index = 0; index < _length; index++) {
2358 const SigEntry& sig_entry = sig->at(index);
2359 BasicType bt = sig_entry._bt;
2360 if (bt == T_METADATA) {
2361 // Found start of inline type in signature
2362 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2363 vt_count++;
2364 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2365 // Found end of inline type in signature
2366 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2367 vt_count--;
2368 assert(vt_count >= 0, "invalid vt_count");
2369 } else if (vt_count == 0) {
2370 // Widen fields that are not part of a scalarized inline type argument
2371 assert(sig_entry._offset == -1, "invalid offset for argument that is not a flattened field %d", sig_entry._offset);
2372 bt = adapter_encoding(bt);
2373 }
2374
2375 ::new(&data[index]) Element(bt, sig_entry._offset);
2376 prev_bt = bt;
2377 }
2378 assert(vt_count == 0, "invalid vt_count");
2379 }
2380
2381 // Call deallocate instead
2382 ~AdapterFingerPrint() {
2383 ShouldNotCallThis();
2384 }
2385
2386 static int total_args_passed_in_sig(const GrowableArray<SigEntry>* sig) {
2387 return (sig != nullptr) ? sig->length() : 0;
2388 }
2389
2390 static int compute_size_in_words(int len) {
2391 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(Element)));
2392 }
2393
2394 // Remap BasicTypes that are handled equivalently by the adapters.
2395 // These are correct for the current system but someday it might be
2396 // necessary to make this mapping platform dependent.
2397 static BasicType adapter_encoding(BasicType in) {
2398 switch (in) {
2399 case T_BOOLEAN:
2400 case T_BYTE:
2401 case T_SHORT:
2402 case T_CHAR:
2403 // They are all promoted to T_INT in the calling convention
2404 return T_INT;
2405
2406 case T_OBJECT:
2407 case T_ARRAY:
2408 // In other words, we assume that any register good enough for
2409 // an int or long is good enough for a managed pointer.
2410 #ifdef _LP64
2411 return T_LONG;
2412 #else
2413 return T_INT;
2414 #endif
2415
2416 case T_INT:
2417 case T_LONG:
2418 case T_FLOAT:
2419 case T_DOUBLE:
2420 case T_VOID:
2421 return in;
2422
2423 default:
2424 ShouldNotReachHere();
2425 return T_CONFLICT;
2426 }
2427 }
2428
2429 void* operator new(size_t size, size_t fp_size) throw() {
2430 assert(fp_size >= size, "sanity check");
2431 void* p = AllocateHeap(fp_size, mtCode);
2432 memset(p, 0, fp_size);
2433 return p;
2434 }
2435
2436 public:
2437 template<typename Function>
2438 void iterate_args(Function function) {
2439 for (int i = 0; i < length(); i++) {
2440 function(element_at(i));
2441 }
2442 }
2443
2444 static AdapterFingerPrint* allocate(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2445 int len = total_args_passed_in_sig(sig);
2446 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2447 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(sig, has_ro_adapter);
2448 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2449 return afp;
2450 }
2451
2452 static void deallocate(AdapterFingerPrint* fp) {
2453 FreeHeap(fp);
2454 }
2455
2456 bool has_ro_adapter() const {
2457 return _has_ro_adapter;
2458 }
2459
2460 int length() const {
2461 return _length;
2462 }
2463
2464 unsigned int compute_hash() {
2465 int hash = 0;
2466 for (int i = 0; i < length(); i++) {
2467 const Element& v = element_at(i);
2468 //Add arithmetic operation to the hash, like +3 to improve hashing
2469 hash = ((hash << 8) ^ v.hash() ^ (hash >> 5)) + 3;
2470 }
2471 return (unsigned int)hash;
2472 }
2473
2474 const char* as_string() {
2475 stringStream st;
2476 st.print("{");
2477 if (_has_ro_adapter) {
2478 st.print("has_ro_adapter");
2479 } else {
2480 st.print("no_ro_adapter");
2481 }
2482 for (int i = 0; i < length(); i++) {
2483 st.print(", ");
2484 const Element& elem = element_at(i);
2485 st.print("{%s, %d}", type2name(elem.bt()), elem.offset());
2486 }
2487 st.print("}");
2488 return st.as_string();
2489 }
2490
2491 const char* as_basic_args_string() {
2492 stringStream st;
2493 bool long_prev = false;
2494 iterate_args([&] (const Element& arg) {
2495 if (long_prev) {
2496 long_prev = false;
2497 if (arg.bt() == T_VOID) {
2498 st.print("J");
2499 } else {
2500 st.print("L");
2501 }
2502 }
2503 if (arg.bt() == T_LONG) {
2504 long_prev = true;
2505 } else if (arg.bt() != T_VOID) {
2506 st.print("%c", type2char(arg.bt()));
2507 }
2508 });
2509 if (long_prev) {
2510 st.print("L");
2511 }
2512 return st.as_string();
2513 }
2514
2515 bool equals(AdapterFingerPrint* other) {
2516 if (other->_has_ro_adapter != _has_ro_adapter) {
2517 return false;
2518 } else if (other->_length != _length) {
2519 return false;
2520 } else {
2521 for (int i = 0; i < _length; i++) {
2522 if (element_at(i) != other->element_at(i)) {
2523 return false;
2524 }
2525 }
2526 }
2527 return true;
2528 }
2529
2530 // methods required by virtue of being a MetaspaceObj
2531 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2532 int size() const { return compute_size_in_words(_length); }
2533 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2534
2535 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2536 NOT_PRODUCT(_equals++);
2537 return fp1->equals(fp2);
2538 }
2539
2540 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2541 return fp->compute_hash();
2542 }
2543 };
2544
2545 #if INCLUDE_CDS
2546 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2547 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2548 }
2549
2550 class ArchivedAdapterTable : public OffsetCompactHashtable<
2551 AdapterFingerPrint*,
2552 AdapterHandlerEntry*,
2553 adapter_fp_equals_compact_hashtable_entry> {};
2554 #endif // INCLUDE_CDS
2555
2556 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2557 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2558 AnyObj::C_HEAP, mtCode,
2559 AdapterFingerPrint::compute_hash,
2560 AdapterFingerPrint::equals>;
2561 static AdapterHandlerTable* _adapter_handler_table;
2562 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2563
2564 // Find a entry with the same fingerprint if it exists
2565 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter) {
2566 NOT_PRODUCT(_lookups++);
2567 assert_lock_strong(AdapterHandlerLibrary_lock);
2568 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(sig, has_ro_adapter);
2569 AdapterHandlerEntry* entry = nullptr;
2570 #if INCLUDE_CDS
2571 // if we are building the archive then the archived adapter table is
2572 // not valid and we need to use the ones added to the runtime table
2573 if (AOTCodeCache::is_using_adapter()) {
2574 // Search archived table first. It is read-only table so can be searched without lock
2575 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2576 #ifndef PRODUCT
2577 if (entry != nullptr) {
2578 _archived_hits++;
2579 }
2580 #endif
2581 }
2582 #endif // INCLUDE_CDS
2583 if (entry == nullptr) {
2584 assert_lock_strong(AdapterHandlerLibrary_lock);
2585 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2586 if (entry_p != nullptr) {
2587 entry = *entry_p;
2588 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2589 entry->fingerprint()->as_basic_args_string(), entry->fingerprint()->as_string(), entry->fingerprint()->compute_hash(),
2590 fp->as_basic_args_string(), fp->as_string(), fp->compute_hash());
2591 #ifndef PRODUCT
2592 _runtime_hits++;
2593 #endif
2594 }
2595 }
2596 AdapterFingerPrint::deallocate(fp);
2597 return entry;
2598 }
2599
2600 #ifndef PRODUCT
2601 static void print_table_statistics() {
2602 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2603 return sizeof(*key) + sizeof(*a);
2604 };
2605 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2606 ts.print(tty, "AdapterHandlerTable");
2607 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2608 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2609 int total_hits = _archived_hits + _runtime_hits;
2610 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2611 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2612 }
2613 #endif
2614
2615 // ---------------------------------------------------------------------------
2616 // Implementation of AdapterHandlerLibrary
2617 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2618 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2619 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2620 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2621 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2622 #if INCLUDE_CDS
2623 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2624 #endif // INCLUDE_CDS
2625 static const int AdapterHandlerLibrary_size = 48*K;
2626 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2627 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2628
2629 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2630 assert(_buffer != nullptr, "should be initialized");
2631 return _buffer;
2632 }
2633
2634 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2635 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2636 AdapterBlob* adapter_blob = entry->adapter_blob();
2637 char blob_id[256];
2638 jio_snprintf(blob_id,
2639 sizeof(blob_id),
2640 "%s(%s)",
2641 adapter_blob->name(),
2642 entry->fingerprint()->as_string());
2643 if (Forte::is_enabled()) {
2644 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2645 }
2646
2647 if (JvmtiExport::should_post_dynamic_code_generated()) {
2648 JvmtiExport::post_dynamic_code_generated(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2649 }
2650 }
2651 }
2652
2653 void AdapterHandlerLibrary::initialize() {
2654 {
2655 ResourceMark rm;
2656 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2657 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2658 }
2659
2660 #if INCLUDE_CDS
2661 // Link adapters in AOT Cache to their code in AOT Code Cache
2662 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2663 link_aot_adapters();
2664 lookup_simple_adapters();
2665 return;
2666 }
2667 #endif // INCLUDE_CDS
2668
2669 ResourceMark rm;
2670 {
2671 MutexLocker mu(AdapterHandlerLibrary_lock);
2672
2673 CompiledEntrySignature no_args;
2674 no_args.compute_calling_conventions();
2675 _no_arg_handler = create_adapter(no_args, true);
2676
2677 CompiledEntrySignature obj_args;
2678 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
2679 obj_args.compute_calling_conventions();
2680 _obj_arg_handler = create_adapter(obj_args, true);
2681
2682 CompiledEntrySignature int_args;
2683 SigEntry::add_entry(int_args.sig(), T_INT);
2684 int_args.compute_calling_conventions();
2685 _int_arg_handler = create_adapter(int_args, true);
2686
2687 CompiledEntrySignature obj_int_args;
2688 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
2689 SigEntry::add_entry(obj_int_args.sig(), T_INT);
2690 obj_int_args.compute_calling_conventions();
2691 _obj_int_arg_handler = create_adapter(obj_int_args, true);
2692
2693 CompiledEntrySignature obj_obj_args;
2694 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2695 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2696 obj_obj_args.compute_calling_conventions();
2697 _obj_obj_arg_handler = create_adapter(obj_obj_args, true);
2698
2699 // we should always get an entry back but we don't have any
2700 // associated blob on Zero
2701 assert(_no_arg_handler != nullptr &&
2702 _obj_arg_handler != nullptr &&
2703 _int_arg_handler != nullptr &&
2704 _obj_int_arg_handler != nullptr &&
2705 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2706 }
2707
2708 // Outside of the lock
2709 #ifndef ZERO
2710 // no blobs to register when we are on Zero
2711 post_adapter_creation(_no_arg_handler);
2712 post_adapter_creation(_obj_arg_handler);
2713 post_adapter_creation(_int_arg_handler);
2714 post_adapter_creation(_obj_int_arg_handler);
2715 post_adapter_creation(_obj_obj_arg_handler);
2716 #endif // ZERO
2717 }
2718
2719 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2720 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2721 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2722 return AdapterHandlerEntry::allocate(id, fingerprint);
2723 }
2724
2725 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2726 int total_args_passed = method->size_of_parameters(); // All args on stack
2727 if (total_args_passed == 0) {
2728 return _no_arg_handler;
2729 } else if (total_args_passed == 1) {
2730 if (!method->is_static()) {
2731 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2732 return nullptr;
2733 }
2734 return _obj_arg_handler;
2735 }
2736 switch (method->signature()->char_at(1)) {
2737 case JVM_SIGNATURE_CLASS: {
2738 if (InlineTypePassFieldsAsArgs) {
2739 SignatureStream ss(method->signature());
2740 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2741 if (vk != nullptr) {
2742 return nullptr;
2743 }
2744 }
2745 return _obj_arg_handler;
2746 }
2747 case JVM_SIGNATURE_ARRAY:
2748 return _obj_arg_handler;
2749 case JVM_SIGNATURE_INT:
2750 case JVM_SIGNATURE_BOOLEAN:
2751 case JVM_SIGNATURE_CHAR:
2752 case JVM_SIGNATURE_BYTE:
2753 case JVM_SIGNATURE_SHORT:
2754 return _int_arg_handler;
2755 }
2756 } else if (total_args_passed == 2 &&
2757 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2758 switch (method->signature()->char_at(1)) {
2759 case JVM_SIGNATURE_CLASS: {
2760 if (InlineTypePassFieldsAsArgs) {
2761 SignatureStream ss(method->signature());
2762 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2763 if (vk != nullptr) {
2764 return nullptr;
2765 }
2766 }
2767 return _obj_obj_arg_handler;
2768 }
2769 case JVM_SIGNATURE_ARRAY:
2770 return _obj_obj_arg_handler;
2771 case JVM_SIGNATURE_INT:
2772 case JVM_SIGNATURE_BOOLEAN:
2773 case JVM_SIGNATURE_CHAR:
2774 case JVM_SIGNATURE_BYTE:
2775 case JVM_SIGNATURE_SHORT:
2776 return _obj_int_arg_handler;
2777 }
2778 }
2779 return nullptr;
2780 }
2781
2782 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2783 _method(method), _num_inline_args(0), _has_inline_recv(false),
2784 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2785 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2786 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2787 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2788 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2789 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2790 }
2791
2792 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2793 // or the same entry for VEP and VIEP(RO).
2794 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2795 if (!has_scalarized_args()) {
2796 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2797 return CodeOffsets::Verified_Entry;
2798 }
2799 if (_method->is_static()) {
2800 // Static methods don't need VIEP(RO)
2801 return CodeOffsets::Verified_Entry;
2802 }
2803
2804 if (has_inline_recv()) {
2805 if (num_inline_args() == 1) {
2806 // Share same entry for VIEP and VIEP(RO).
2807 // This is quite common: we have an instance method in an InlineKlass that has
2808 // no inline type args other than <this>.
2809 return CodeOffsets::Verified_Inline_Entry;
2810 } else {
2811 assert(num_inline_args() > 1, "must be");
2812 // No sharing:
2813 // VIEP(RO) -- <this> is passed as object
2814 // VEP -- <this> is passed as fields
2815 return CodeOffsets::Verified_Inline_Entry_RO;
2816 }
2817 }
2818
2819 // Either a static method, or <this> is not an inline type
2820 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2821 // No sharing:
2822 // Some arguments are passed on the stack, and we have inserted reserved entries
2823 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2824 return CodeOffsets::Verified_Inline_Entry_RO;
2825 } else {
2826 // Share same entry for VEP and VIEP(RO).
2827 return CodeOffsets::Verified_Entry;
2828 }
2829 }
2830
2831 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2832 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2833 if (_supers != nullptr) {
2834 return _supers;
2835 }
2836 _supers = new GrowableArray<Method*>();
2837 // Skip private, static, and <init> methods
2838 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2839 return _supers;
2840 }
2841 Symbol* name = _method->name();
2842 Symbol* signature = _method->signature();
2843 const Klass* holder = _method->method_holder()->super();
2844 Symbol* holder_name = holder->name();
2845 ThreadInVMfromUnknown tiv;
2846 JavaThread* current = JavaThread::current();
2847 HandleMark hm(current);
2848 Handle loader(current, _method->method_holder()->class_loader());
2849
2850 // Walk up the class hierarchy and search for super methods
2851 while (holder != nullptr) {
2852 Method* super_method = holder->lookup_method(name, signature);
2853 if (super_method == nullptr) {
2854 break;
2855 }
2856 if (!super_method->is_static() && !super_method->is_private() &&
2857 (!super_method->is_package_private() ||
2858 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2859 _supers->push(super_method);
2860 }
2861 holder = super_method->method_holder()->super();
2862 }
2863 // Search interfaces for super methods
2864 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2865 for (int i = 0; i < interfaces->length(); ++i) {
2866 Method* m = interfaces->at(i)->lookup_method(name, signature);
2867 if (m != nullptr && !m->is_static() && m->is_public()) {
2868 _supers->push(m);
2869 }
2870 }
2871 return _supers;
2872 }
2873
2874 // Iterate over arguments and compute scalarized and non-scalarized signatures
2875 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2876 bool has_scalarized = false;
2877 if (_method != nullptr) {
2878 InstanceKlass* holder = _method->method_holder();
2879 int arg_num = 0;
2880 if (!_method->is_static()) {
2881 // We shouldn't scalarize 'this' in a value class constructor
2882 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2883 (init || _method->is_scalarized_arg(arg_num))) {
2884 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2885 has_scalarized = true;
2886 _has_inline_recv = true;
2887 _num_inline_args++;
2888 } else {
2889 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2890 }
2891 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2892 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2893 arg_num++;
2894 }
2895 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2896 BasicType bt = ss.type();
2897 if (bt == T_OBJECT) {
2898 InlineKlass* vk = ss.as_inline_klass(holder);
2899 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2900 // Check for a calling convention mismatch with super method(s)
2901 bool scalar_super = false;
2902 bool non_scalar_super = false;
2903 GrowableArray<Method*>* supers = get_supers();
2904 for (int i = 0; i < supers->length(); ++i) {
2905 Method* super_method = supers->at(i);
2906 if (super_method->is_scalarized_arg(arg_num)) {
2907 scalar_super = true;
2908 } else {
2909 non_scalar_super = true;
2910 }
2911 }
2912 #ifdef ASSERT
2913 // Randomly enable below code paths for stress testing
2914 bool stress = init && StressCallingConvention;
2915 if (stress && (os::random() & 1) == 1) {
2916 non_scalar_super = true;
2917 if ((os::random() & 1) == 1) {
2918 scalar_super = true;
2919 }
2920 }
2921 #endif
2922 if (non_scalar_super) {
2923 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2924 if (scalar_super) {
2925 // Found non-scalar *and* scalar super methods. We can't handle both.
2926 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2927 for (int i = 0; i < supers->length(); ++i) {
2928 Method* super_method = supers->at(i);
2929 if (super_method->is_scalarized_arg(arg_num) DEBUG_ONLY(|| (stress && (os::random() & 1) == 1))) {
2930 super_method->set_mismatch();
2931 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2932 JavaThread* thread = JavaThread::current();
2933 HandleMark hm(thread);
2934 methodHandle mh(thread, super_method);
2935 DeoptimizationScope deopt_scope;
2936 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2937 deopt_scope.deoptimize_marked();
2938 }
2939 }
2940 }
2941 // Fall back to non-scalarized calling convention
2942 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2943 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2944 } else {
2945 _num_inline_args++;
2946 has_scalarized = true;
2947 int last = _sig_cc->length();
2948 int last_ro = _sig_cc_ro->length();
2949 _sig_cc->appendAll(vk->extended_sig());
2950 _sig_cc_ro->appendAll(vk->extended_sig());
2951 if (bt == T_OBJECT) {
2952 // Nullable inline type argument, insert InlineTypeNode::NullMarker field right after T_METADATA delimiter
2953 _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2954 _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2955 }
2956 }
2957 } else {
2958 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2959 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2960 }
2961 bt = T_OBJECT;
2962 } else {
2963 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2964 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2965 }
2966 SigEntry::add_entry(_sig, bt, ss.as_symbol());
2967 if (bt != T_VOID) {
2968 arg_num++;
2969 }
2970 }
2971 }
2972
2973 // Compute the non-scalarized calling convention
2974 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
2975 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
2976
2977 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
2978 if (has_scalarized && !_method->is_native()) {
2979 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
2980 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
2981
2982 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
2983 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
2984
2985 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
2986 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
2987
2988 // Upper bound on stack arguments to avoid hitting the argument limit and
2989 // bailing out of compilation ("unsupported incoming calling sequence").
2990 // TODO we need a reasonable limit (flag?) here
2991 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
2992 return; // Success
2993 }
2994 }
2995
2996 // No scalarized args
2997 _sig_cc = _sig;
2998 _regs_cc = _regs;
2999 _args_on_stack_cc = _args_on_stack;
3000
3001 _sig_cc_ro = _sig;
3002 _regs_cc_ro = _regs;
3003 _args_on_stack_cc_ro = _args_on_stack;
3004 }
3005
3006 void CompiledEntrySignature::initialize_from_fingerprint(AdapterFingerPrint* fingerprint) {
3007 _has_inline_recv = fingerprint->has_ro_adapter();
3008
3009 int value_object_count = 0;
3010 BasicType prev_bt = T_ILLEGAL;
3011 bool has_scalarized_arguments = false;
3012 bool long_prev = false;
3013 int long_prev_offset = -1;
3014
3015 fingerprint->iterate_args([&] (const AdapterFingerPrint::Element& arg) {
3016 BasicType bt = arg.bt();
3017 int offset = arg.offset();
3018
3019 if (long_prev) {
3020 long_prev = false;
3021 BasicType bt_to_add;
3022 if (bt == T_VOID) {
3023 bt_to_add = T_LONG;
3024 } else {
3025 bt_to_add = T_OBJECT;
3026 }
3027 if (value_object_count == 0) {
3028 SigEntry::add_entry(_sig, bt_to_add);
3029 }
3030 SigEntry::add_entry(_sig_cc, bt_to_add, nullptr, long_prev_offset);
3031 SigEntry::add_entry(_sig_cc_ro, bt_to_add, nullptr, long_prev_offset);
3032 }
3033
3034 switch (bt) {
3035 case T_VOID:
3036 if (prev_bt != T_LONG && prev_bt != T_DOUBLE) {
3037 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
3038 value_object_count--;
3039 SigEntry::add_entry(_sig_cc, T_VOID, nullptr, offset);
3040 SigEntry::add_entry(_sig_cc_ro, T_VOID, nullptr, offset);
3041 assert(value_object_count >= 0, "invalid value object count");
3042 } else {
3043 // Nothing to add for _sig: We already added an addition T_VOID in add_entry() when adding T_LONG or T_DOUBLE.
3044 }
3045 break;
3046 case T_INT:
3047 case T_FLOAT:
3048 case T_DOUBLE:
3049 if (value_object_count == 0) {
3050 SigEntry::add_entry(_sig, bt);
3051 }
3052 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3053 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3054 break;
3055 case T_LONG:
3056 long_prev = true;
3057 long_prev_offset = offset;
3058 break;
3059 case T_BOOLEAN:
3060 case T_CHAR:
3061 case T_BYTE:
3062 case T_SHORT:
3063 case T_OBJECT:
3064 case T_ARRAY:
3065 assert(value_object_count > 0, "must be value object field");
3066 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3067 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3068 break;
3069 case T_METADATA:
3070 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
3071 if (value_object_count == 0) {
3072 SigEntry::add_entry(_sig, T_OBJECT);
3073 }
3074 SigEntry::add_entry(_sig_cc, T_METADATA, nullptr, offset);
3075 SigEntry::add_entry(_sig_cc_ro, T_METADATA, nullptr, offset);
3076 value_object_count++;
3077 has_scalarized_arguments = true;
3078 break;
3079 default: {
3080 fatal("Unexpected BasicType: %s", basictype_to_str(bt));
3081 }
3082 }
3083 prev_bt = bt;
3084 });
3085
3086 if (long_prev) {
3087 // If previous bt was T_LONG and we reached the end of the signature, we know that it must be a T_OBJECT.
3088 SigEntry::add_entry(_sig, T_OBJECT);
3089 SigEntry::add_entry(_sig_cc, T_OBJECT);
3090 SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3091 }
3092 assert(value_object_count == 0, "invalid value object count");
3093
3094 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3095 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3096
3097 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3098 if (has_scalarized_arguments) {
3099 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3100 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3101
3102 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3103 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3104
3105 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3106 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3107 } else {
3108 // No scalarized args
3109 _sig_cc = _sig;
3110 _regs_cc = _regs;
3111 _args_on_stack_cc = _args_on_stack;
3112
3113 _sig_cc_ro = _sig;
3114 _regs_cc_ro = _regs;
3115 _args_on_stack_cc_ro = _args_on_stack;
3116 }
3117
3118 #ifdef ASSERT
3119 {
3120 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(_sig_cc, _has_inline_recv);
3121 assert(fingerprint->equals(compare_fp), "%s - %s", fingerprint->as_string(), compare_fp->as_string());
3122 AdapterFingerPrint::deallocate(compare_fp);
3123 }
3124 #endif
3125 }
3126
3127 const char* AdapterHandlerEntry::_entry_names[] = {
3128 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
3129 };
3130
3131 #ifdef ASSERT
3132 void AdapterHandlerLibrary::verify_adapter_sharing(CompiledEntrySignature& ces, AdapterHandlerEntry* cached_entry) {
3133 // we can only check for the same code if there is any
3134 #ifndef ZERO
3135 AdapterHandlerEntry* comparison_entry = create_adapter(ces, false, true);
3136 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
3137 assert(comparison_entry->compare_code(cached_entry), "code must match");
3138 // Release the one just created
3139 AdapterHandlerEntry::deallocate(comparison_entry);
3140 # endif // ZERO
3141 }
3142 #endif /* ASSERT*/
3143
3144 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3145 assert(!method->is_abstract(), "abstract methods do not have adapters");
3146 // Use customized signature handler. Need to lock around updates to
3147 // the _adapter_handler_table (it is not safe for concurrent readers
3148 // and a single writer: this could be fixed if it becomes a
3149 // problem).
3150
3151 // Fast-path for trivial adapters
3152 AdapterHandlerEntry* entry = get_simple_adapter(method);
3153 if (entry != nullptr) {
3154 return entry;
3155 }
3156
3157 ResourceMark rm;
3158 bool new_entry = false;
3159
3160 CompiledEntrySignature ces(method());
3161 ces.compute_calling_conventions();
3162 if (ces.has_scalarized_args()) {
3163 if (!method->has_scalarized_args()) {
3164 method->set_has_scalarized_args();
3165 }
3166 if (ces.c1_needs_stack_repair()) {
3167 method->set_c1_needs_stack_repair();
3168 }
3169 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
3170 method->set_c2_needs_stack_repair();
3171 }
3172 }
3173
3174 {
3175 MutexLocker mu(AdapterHandlerLibrary_lock);
3176
3177 // Lookup method signature's fingerprint
3178 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3179
3180 if (entry != nullptr) {
3181 #ifndef ZERO
3182 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
3183 #endif
3184 #ifdef ASSERT
3185 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
3186 verify_adapter_sharing(ces, entry);
3187 }
3188 #endif
3189 } else {
3190 entry = create_adapter(ces, /* allocate_code_blob */ true);
3191 if (entry != nullptr) {
3192 new_entry = true;
3193 }
3194 }
3195 }
3196
3197 // Outside of the lock
3198 if (new_entry) {
3199 post_adapter_creation(entry);
3200 }
3201 return entry;
3202 }
3203
3204 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
3205 ResourceMark rm;
3206 const char* name = AdapterHandlerLibrary::name(handler);
3207 const uint32_t id = AdapterHandlerLibrary::id(handler);
3208
3209 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
3210 if (blob != nullptr) {
3211 handler->set_adapter_blob(blob->as_adapter_blob());
3212 }
3213 }
3214
3215 #ifndef PRODUCT
3216 void AdapterHandlerLibrary::print_adapter_handler_info(outputStream* st, AdapterHandlerEntry* handler) {
3217 ttyLocker ttyl;
3218 ResourceMark rm;
3219 int insts_size;
3220 // on Zero the blob may be null
3221 handler->print_adapter_on(tty);
3222 AdapterBlob* adapter_blob = handler->adapter_blob();
3223 if (adapter_blob == nullptr) {
3224 return;
3225 }
3226 insts_size = adapter_blob->code_size();
3227 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
3228 handler->fingerprint()->as_basic_args_string(),
3229 handler->fingerprint()->as_string(), insts_size);
3230 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
3231 if (Verbose || PrintStubCode) {
3232 address first_pc = adapter_blob->content_begin();
3233 if (first_pc != nullptr) {
3234 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
3235 st->cr();
3236 }
3237 }
3238 }
3239 #endif // PRODUCT
3240
3241 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
3242 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
3243 entry_offset[AdapterBlob::I2C] = 0;
3244 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
3245 entry_offset[AdapterBlob::C2I_Inline] = entry_address[AdapterBlob::C2I_Inline] - entry_address[AdapterBlob::I2C];
3246 entry_offset[AdapterBlob::C2I_Inline_RO] = entry_address[AdapterBlob::C2I_Inline_RO] - entry_address[AdapterBlob::I2C];
3247 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
3248 entry_offset[AdapterBlob::C2I_Unverified_Inline] = entry_address[AdapterBlob::C2I_Unverified_Inline] - entry_address[AdapterBlob::I2C];
3249 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
3250 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
3251 } else {
3252 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
3253 }
3254 }
3255
3256 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
3257 CompiledEntrySignature& ces,
3258 bool allocate_code_blob,
3259 bool is_transient) {
3260 if (log_is_enabled(Info, perf, class, link)) {
3261 ClassLoader::perf_method_adapters_count()->inc();
3262 }
3263
3264 #ifndef ZERO
3265 AdapterBlob* adapter_blob = nullptr;
3266 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3267 CodeBuffer buffer(buf);
3268 short buffer_locs[20];
3269 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3270 sizeof(buffer_locs)/sizeof(relocInfo));
3271 MacroAssembler masm(&buffer);
3272 address entry_address[AdapterBlob::ENTRY_COUNT];
3273
3274 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
3275 SharedRuntime::generate_i2c2i_adapters(&masm,
3276 ces.args_on_stack(),
3277 ces.sig(),
3278 ces.regs(),
3279 ces.sig_cc(),
3280 ces.regs_cc(),
3281 ces.sig_cc_ro(),
3282 ces.regs_cc_ro(),
3283 entry_address,
3284 adapter_blob,
3285 allocate_code_blob);
3286
3287 if (ces.has_scalarized_args()) {
3288 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3289 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3290 heap_sig->appendAll(ces.sig_cc());
3291 handler->set_sig_cc(heap_sig);
3292 }
3293 // On zero there is no code to save and no need to create a blob and
3294 // or relocate the handler.
3295 int entry_offset[AdapterBlob::ENTRY_COUNT];
3296 address_to_offset(entry_address, entry_offset);
3297 #ifdef ASSERT
3298 if (VerifyAdapterSharing) {
3299 handler->save_code(buf->code_begin(), buffer.insts_size());
3300 if (is_transient) {
3301 return true;
3302 }
3303 }
3304 #endif
3305 if (adapter_blob == nullptr) {
3306 // CodeCache is full, disable compilation
3307 // Ought to log this but compile log is only per compile thread
3308 // and we're some non descript Java thread.
3309 return false;
3310 }
3311 handler->set_adapter_blob(adapter_blob);
3312 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
3313 // try to save generated code
3314 const char* name = AdapterHandlerLibrary::name(handler);
3315 const uint32_t id = AdapterHandlerLibrary::id(handler);
3316 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
3317 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
3318 }
3319 #endif // ZERO
3320
3321 #ifndef PRODUCT
3322 // debugging support
3323 if (PrintAdapterHandlers || PrintStubCode) {
3324 print_adapter_handler_info(tty, handler);
3325 }
3326 #endif
3327
3328 return true;
3329 }
3330
3331 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(CompiledEntrySignature& ces,
3332 bool allocate_code_blob,
3333 bool is_transient) {
3334 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(ces.sig_cc(), ces.has_inline_recv());
3335 #ifdef ASSERT
3336 // Verify that we can successfully restore the compiled entry signature object.
3337 CompiledEntrySignature ces_verify;
3338 ces_verify.initialize_from_fingerprint(fp);
3339 #endif
3340 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
3341 if (!generate_adapter_code(handler, ces, allocate_code_blob, is_transient)) {
3342 AdapterHandlerEntry::deallocate(handler);
3343 return nullptr;
3344 }
3345 if (!is_transient) {
3346 assert_lock_strong(AdapterHandlerLibrary_lock);
3347 _adapter_handler_table->put(fp, handler);
3348 }
3349 return handler;
3350 }
3351
3352 #if INCLUDE_CDS
3353 void AdapterHandlerEntry::remove_unshareable_info() {
3354 #ifdef ASSERT
3355 _saved_code = nullptr;
3356 _saved_code_length = 0;
3357 #endif // ASSERT
3358 _adapter_blob = nullptr;
3359 _linked = false;
3360 }
3361
3362 class CopyAdapterTableToArchive : StackObj {
3363 private:
3364 CompactHashtableWriter* _writer;
3365 ArchiveBuilder* _builder;
3366 public:
3367 CopyAdapterTableToArchive(CompactHashtableWriter* writer) : _writer(writer),
3368 _builder(ArchiveBuilder::current())
3369 {}
3370
3371 bool do_entry(AdapterFingerPrint* fp, AdapterHandlerEntry* entry) {
3372 LogStreamHandle(Trace, aot) lsh;
3373 if (ArchiveBuilder::current()->has_been_archived((address)entry)) {
3374 assert(ArchiveBuilder::current()->has_been_archived((address)fp), "must be");
3375 AdapterFingerPrint* buffered_fp = ArchiveBuilder::current()->get_buffered_addr(fp);
3376 assert(buffered_fp != nullptr,"sanity check");
3377 AdapterHandlerEntry* buffered_entry = ArchiveBuilder::current()->get_buffered_addr(entry);
3378 assert(buffered_entry != nullptr,"sanity check");
3379
3380 uint hash = fp->compute_hash();
3381 u4 delta = _builder->buffer_to_offset_u4((address)buffered_entry);
3382 _writer->add(hash, delta);
3383 if (lsh.is_enabled()) {
3384 address fp_runtime_addr = (address)buffered_fp + ArchiveBuilder::current()->buffer_to_requested_delta();
3385 address entry_runtime_addr = (address)buffered_entry + ArchiveBuilder::current()->buffer_to_requested_delta();
3386 log_trace(aot)("Added fp=%p (%s), entry=%p to the archived adater table", buffered_fp, buffered_fp->as_basic_args_string(), buffered_entry);
3387 }
3388 } else {
3389 if (lsh.is_enabled()) {
3390 log_trace(aot)("Skipping adapter handler %p (fp=%s) as it is not archived", entry, fp->as_basic_args_string());
3391 }
3392 }
3393 return true;
3394 }
3395 };
3396
3397 void AdapterHandlerLibrary::dump_aot_adapter_table() {
3398 CompactHashtableStats stats;
3399 CompactHashtableWriter writer(_adapter_handler_table->number_of_entries(), &stats);
3400 CopyAdapterTableToArchive copy(&writer);
3401 _adapter_handler_table->iterate(©);
3402 writer.dump(&_aot_adapter_handler_table, "archived adapter table");
3403 }
3404
3405 void AdapterHandlerLibrary::serialize_shared_table_header(SerializeClosure* soc) {
3406 _aot_adapter_handler_table.serialize_header(soc);
3407 }
3408
3409 void AdapterHandlerLibrary::link_aot_adapter_handler(AdapterHandlerEntry* handler) {
3410 #ifdef ASSERT
3411 if (TestAOTAdapterLinkFailure) {
3412 return;
3413 }
3414 #endif
3415 lookup_aot_cache(handler);
3416 #ifndef PRODUCT
3417 // debugging support
3418 if (PrintAdapterHandlers || PrintStubCode) {
3419 print_adapter_handler_info(tty, handler);
3420 }
3421 #endif
3422 }
3423
3424 // This method is used during production run to link archived adapters (stored in AOT Cache)
3425 // to their code in AOT Code Cache
3426 void AdapterHandlerEntry::link() {
3427 ResourceMark rm;
3428 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3429 bool generate_code = false;
3430 // Generate code only if AOTCodeCache is not available, or
3431 // caching adapters is disabled, or we fail to link
3432 // the AdapterHandlerEntry to its code in the AOTCodeCache
3433 if (AOTCodeCache::is_using_adapter()) {
3434 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3435 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3436 if (_adapter_blob == nullptr) {
3437 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3438 generate_code = true;
3439 }
3440 } else {
3441 generate_code = true;
3442 }
3443 if (generate_code) {
3444 CompiledEntrySignature ces;
3445 ces.initialize_from_fingerprint(_fingerprint);
3446 if (!AdapterHandlerLibrary::generate_adapter_code(this, ces, true, false)) {
3447 // Don't throw exceptions during VM initialization because java.lang.* classes
3448 // might not have been initialized, causing problems when constructing the
3449 // Java exception object.
3450 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3451 }
3452 }
3453 if (_adapter_blob != nullptr) {
3454 post_adapter_creation(this);
3455 }
3456 assert(_linked, "AdapterHandlerEntry must now be linked");
3457 }
3458
3459 void AdapterHandlerLibrary::link_aot_adapters() {
3460 uint max_id = 0;
3461 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3462 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3463 * That implies adapter ids of the adapters in the cache may not be contiguous.
3464 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3465 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3466 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3467 */
3468 _aot_adapter_handler_table.iterate([&](AdapterHandlerEntry* entry) {
3469 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3470 entry->link();
3471 max_id = MAX2(max_id, entry->id());
3472 });
3473 // Set adapter id to the maximum id found in the AOTCache
3474 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3475 _id_counter = max_id;
3476 }
3477
3478 // This method is called during production run to lookup simple adapters
3479 // in the archived adapter handler table
3480 void AdapterHandlerLibrary::lookup_simple_adapters() {
3481 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3482
3483 MutexLocker mu(AdapterHandlerLibrary_lock);
3484 ResourceMark rm;
3485 CompiledEntrySignature no_args;
3486 no_args.compute_calling_conventions();
3487 _no_arg_handler = lookup(no_args.sig_cc(), no_args.has_inline_recv());
3488
3489 CompiledEntrySignature obj_args;
3490 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
3491 obj_args.compute_calling_conventions();
3492 _obj_arg_handler = lookup(obj_args.sig_cc(), obj_args.has_inline_recv());
3493
3494 CompiledEntrySignature int_args;
3495 SigEntry::add_entry(int_args.sig(), T_INT);
3496 int_args.compute_calling_conventions();
3497 _int_arg_handler = lookup(int_args.sig_cc(), int_args.has_inline_recv());
3498
3499 CompiledEntrySignature obj_int_args;
3500 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
3501 SigEntry::add_entry(obj_int_args.sig(), T_INT);
3502 obj_int_args.compute_calling_conventions();
3503 _obj_int_arg_handler = lookup(obj_int_args.sig_cc(), obj_int_args.has_inline_recv());
3504
3505 CompiledEntrySignature obj_obj_args;
3506 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3507 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3508 obj_obj_args.compute_calling_conventions();
3509 _obj_obj_arg_handler = lookup(obj_obj_args.sig_cc(), obj_obj_args.has_inline_recv());
3510
3511 assert(_no_arg_handler != nullptr &&
3512 _obj_arg_handler != nullptr &&
3513 _int_arg_handler != nullptr &&
3514 _obj_int_arg_handler != nullptr &&
3515 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3516 assert(_no_arg_handler->is_linked() &&
3517 _obj_arg_handler->is_linked() &&
3518 _int_arg_handler->is_linked() &&
3519 _obj_int_arg_handler->is_linked() &&
3520 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3521 }
3522 #endif // INCLUDE_CDS
3523
3524 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3525 LogStreamHandle(Trace, aot) lsh;
3526 if (lsh.is_enabled()) {
3527 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3528 lsh.cr();
3529 }
3530 it->push(&_fingerprint);
3531 }
3532
3533 AdapterHandlerEntry::~AdapterHandlerEntry() {
3534 if (_fingerprint != nullptr) {
3535 AdapterFingerPrint::deallocate(_fingerprint);
3536 _fingerprint = nullptr;
3537 }
3538 if (_sig_cc != nullptr) {
3539 delete _sig_cc;
3540 }
3541 #ifdef ASSERT
3542 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3543 #endif
3544 FreeHeap(this);
3545 }
3546
3547
3548 #ifdef ASSERT
3549 // Capture the code before relocation so that it can be compared
3550 // against other versions. If the code is captured after relocation
3551 // then relative instructions won't be equivalent.
3552 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3553 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3554 _saved_code_length = length;
3555 memcpy(_saved_code, buffer, length);
3556 }
3557
3558
3559 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3560 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3561
3562 if (other->_saved_code_length != _saved_code_length) {
3563 return false;
3564 }
3565
3566 return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
3567 }
3568 #endif
3569
3570
3571 /**
3572 * Create a native wrapper for this native method. The wrapper converts the
3573 * Java-compiled calling convention to the native convention, handles
3574 * arguments, and transitions to native. On return from the native we transition
3575 * back to java blocking if a safepoint is in progress.
3576 */
3577 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3578 ResourceMark rm;
3579 nmethod* nm = nullptr;
3580
3581 // Check if memory should be freed before allocation
3582 CodeCache::gc_on_allocation();
3583
3584 assert(method->is_native(), "must be native");
3585 assert(method->is_special_native_intrinsic() ||
3586 method->has_native_function(), "must have something valid to call!");
3587
3588 {
3589 // Perform the work while holding the lock, but perform any printing outside the lock
3590 MutexLocker mu(AdapterHandlerLibrary_lock);
3591 // See if somebody beat us to it
3592 if (method->code() != nullptr) {
3593 return;
3594 }
3595
3596 const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3597 assert(compile_id > 0, "Must generate native wrapper");
3598
3599
3600 ResourceMark rm;
3601 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3602 if (buf != nullptr) {
3603 CodeBuffer buffer(buf);
3604
3605 if (method->is_continuation_enter_intrinsic()) {
3606 buffer.initialize_stubs_size(192);
3607 }
3608
3609 struct { double data[20]; } locs_buf;
3610 struct { double data[20]; } stubs_locs_buf;
3611 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3612 #if defined(AARCH64) || defined(PPC64)
3613 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3614 // in the constant pool to ensure ordering between the barrier and oops
3615 // accesses. For native_wrappers we need a constant.
3616 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3617 // static java call that is resolved in the runtime.
3618 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3619 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3620 }
3621 #endif
3622 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3623 MacroAssembler _masm(&buffer);
3624
3625 // Fill in the signature array, for the calling-convention call.
3626 const int total_args_passed = method->size_of_parameters();
3627
3628 BasicType stack_sig_bt[16];
3629 VMRegPair stack_regs[16];
3630 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3631 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3632
3633 int i = 0;
3634 if (!method->is_static()) { // Pass in receiver first
3635 sig_bt[i++] = T_OBJECT;
3636 }
3637 SignatureStream ss(method->signature());
3638 for (; !ss.at_return_type(); ss.next()) {
3639 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3640 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3641 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3642 }
3643 }
3644 assert(i == total_args_passed, "");
3645 BasicType ret_type = ss.type();
3646
3647 // Now get the compiled-Java arguments layout.
3648 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3649
3650 // Generate the compiled-to-native wrapper code
3651 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3652
3653 if (nm != nullptr) {
3654 {
3655 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3656 if (nm->make_in_use()) {
3657 method->set_code(method, nm);
3658 }
3659 }
3660
3661 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3662 if (directive->PrintAssemblyOption) {
3663 nm->print_code();
3664 }
3665 DirectivesStack::release(directive);
3666 }
3667 }
3668 } // Unlock AdapterHandlerLibrary_lock
3669
3670
3671 // Install the generated code.
3672 if (nm != nullptr) {
3673 const char *msg = method->is_static() ? "(static)" : "";
3674 CompileTask::print_ul(nm, msg);
3675 if (PrintCompilation) {
3676 ttyLocker ttyl;
3677 CompileTask::print(tty, nm, msg);
3678 }
3679 nm->post_compiled_method_load_event();
3680 }
3681 }
3682
3683 // -------------------------------------------------------------------------
3684 // Java-Java calling convention
3685 // (what you use when Java calls Java)
3686
3687 //------------------------------name_for_receiver----------------------------------
3688 // For a given signature, return the VMReg for parameter 0.
3689 VMReg SharedRuntime::name_for_receiver() {
3690 VMRegPair regs;
3691 BasicType sig_bt = T_OBJECT;
3692 (void) java_calling_convention(&sig_bt, ®s, 1);
3693 // Return argument 0 register. In the LP64 build pointers
3694 // take 2 registers, but the VM wants only the 'main' name.
3695 return regs.first();
3696 }
3697
3698 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
3699 // This method is returning a data structure allocating as a
3700 // ResourceObject, so do not put any ResourceMarks in here.
3701
3702 BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
3703 VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
3704 int cnt = 0;
3705 if (has_receiver) {
3706 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
3707 }
3708
3709 for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) {
3710 BasicType type = ss.type();
3711 sig_bt[cnt++] = type;
3712 if (is_double_word_type(type))
3713 sig_bt[cnt++] = T_VOID;
3714 }
3715
3716 if (has_appendix) {
3717 sig_bt[cnt++] = T_OBJECT;
3718 }
3719
3720 assert(cnt < 256, "grow table size");
3721
3722 int comp_args_on_stack;
3723 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt);
3724
3725 // the calling convention doesn't count out_preserve_stack_slots so
3726 // we must add that in to get "true" stack offsets.
3727
3728 if (comp_args_on_stack) {
3729 for (int i = 0; i < cnt; i++) {
3730 VMReg reg1 = regs[i].first();
3731 if (reg1->is_stack()) {
3732 // Yuck
3733 reg1 = reg1->bias(out_preserve_stack_slots());
3734 }
3735 VMReg reg2 = regs[i].second();
3736 if (reg2->is_stack()) {
3737 // Yuck
3738 reg2 = reg2->bias(out_preserve_stack_slots());
3739 }
3740 regs[i].set_pair(reg2, reg1);
3741 }
3742 }
3743
3744 // results
3745 *arg_size = cnt;
3746 return regs;
3747 }
3748
3749 // OSR Migration Code
3750 //
3751 // This code is used convert interpreter frames into compiled frames. It is
3752 // called from very start of a compiled OSR nmethod. A temp array is
3753 // allocated to hold the interesting bits of the interpreter frame. All
3754 // active locks are inflated to allow them to move. The displaced headers and
3755 // active interpreter locals are copied into the temp buffer. Then we return
3756 // back to the compiled code. The compiled code then pops the current
3757 // interpreter frame off the stack and pushes a new compiled frame. Then it
3758 // copies the interpreter locals and displaced headers where it wants.
3759 // Finally it calls back to free the temp buffer.
3760 //
3761 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3762
3763 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
3764 assert(current == JavaThread::current(), "pre-condition");
3765 JFR_ONLY(Jfr::check_and_process_sample_request(current);)
3766 // During OSR migration, we unwind the interpreted frame and replace it with a compiled
3767 // frame. The stack watermark code below ensures that the interpreted frame is processed
3768 // before it gets unwound. This is helpful as the size of the compiled frame could be
3769 // larger than the interpreted frame, which could result in the new frame not being
3770 // processed correctly.
3771 StackWatermarkSet::before_unwind(current);
3772
3773 //
3774 // This code is dependent on the memory layout of the interpreter local
3775 // array and the monitors. On all of our platforms the layout is identical
3776 // so this code is shared. If some platform lays the their arrays out
3777 // differently then this code could move to platform specific code or
3778 // the code here could be modified to copy items one at a time using
3779 // frame accessor methods and be platform independent.
3780
3781 frame fr = current->last_frame();
3782 assert(fr.is_interpreted_frame(), "");
3783 assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
3784
3785 // Figure out how many monitors are active.
3786 int active_monitor_count = 0;
3787 for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
3788 kptr < fr.interpreter_frame_monitor_begin();
3789 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
3790 if (kptr->obj() != nullptr) active_monitor_count++;
3791 }
3792
3793 // QQQ we could place number of active monitors in the array so that compiled code
3794 // could double check it.
3795
3796 Method* moop = fr.interpreter_frame_method();
3797 int max_locals = moop->max_locals();
3798 // Allocate temp buffer, 1 word per local & 2 per active monitor
3799 int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
3800 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
3801
3802 // Copy the locals. Order is preserved so that loading of longs works.
3803 // Since there's no GC I can copy the oops blindly.
3804 assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
3805 Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
3806 (HeapWord*)&buf[0],
3807 max_locals);
3808
3809 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
3810 int i = max_locals;
3811 for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3812 kptr2 < fr.interpreter_frame_monitor_begin();
3813 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3814 if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array
3815 BasicLock *lock = kptr2->lock();
3816 if (UseObjectMonitorTable) {
3817 buf[i] = (intptr_t)lock->object_monitor_cache();
3818 }
3819 #ifdef ASSERT
3820 else {
3821 buf[i] = badDispHeaderOSR;
3822 }
3823 #endif
3824 i++;
3825 buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3826 }
3827 }
3828 assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3829
3830 RegisterMap map(current,
3831 RegisterMap::UpdateMap::skip,
3832 RegisterMap::ProcessFrames::include,
3833 RegisterMap::WalkContinuation::skip);
3834 frame sender = fr.sender(&map);
3835 if (sender.is_interpreted_frame()) {
3836 current->push_cont_fastpath(sender.sp());
3837 }
3838
3839 return buf;
3840 JRT_END
3841
3842 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3843 FREE_C_HEAP_ARRAY(intptr_t, buf);
3844 JRT_END
3845
3846 bool AdapterHandlerLibrary::contains(const CodeBlob* b) {
3847 bool found = false;
3848 #if INCLUDE_CDS
3849 if (AOTCodeCache::is_using_adapter()) {
3850 auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3851 return (found = (b == CodeCache::find_blob(handler->get_i2c_entry())));
3852 };
3853 _aot_adapter_handler_table.iterate(findblob_archived_table);
3854 }
3855 #endif // INCLUDE_CDS
3856 if (!found) {
3857 auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3858 return (found = (b == CodeCache::find_blob(a->get_i2c_entry())));
3859 };
3860 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3861 _adapter_handler_table->iterate(findblob_runtime_table);
3862 }
3863 return found;
3864 }
3865
3866 const char* AdapterHandlerLibrary::name(AdapterHandlerEntry* handler) {
3867 return handler->fingerprint()->as_basic_args_string();
3868 }
3869
3870 uint32_t AdapterHandlerLibrary::id(AdapterHandlerEntry* handler) {
3871 return handler->id();
3872 }
3873
3874 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3875 bool found = false;
3876 #if INCLUDE_CDS
3877 if (AOTCodeCache::is_using_adapter()) {
3878 auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3879 if (b == CodeCache::find_blob(handler->get_i2c_entry())) {
3880 found = true;
3881 st->print("Adapter for signature: ");
3882 handler->print_adapter_on(st);
3883 return true;
3884 } else {
3885 return false; // keep looking
3886 }
3887 };
3888 _aot_adapter_handler_table.iterate(findblob_archived_table);
3889 }
3890 #endif // INCLUDE_CDS
3891 if (!found) {
3892 auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3893 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3894 found = true;
3895 st->print("Adapter for signature: ");
3896 a->print_adapter_on(st);
3897 return true;
3898 } else {
3899 return false; // keep looking
3900 }
3901 };
3902 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3903 _adapter_handler_table->iterate(findblob_runtime_table);
3904 }
3905 assert(found, "Should have found handler");
3906 }
3907
3908 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3909 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3910 if (adapter_blob() != nullptr) {
3911 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3912 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3913 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3914 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3915 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3916 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3917 if (get_c2i_no_clinit_check_entry() != nullptr) {
3918 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3919 }
3920 }
3921 st->cr();
3922 }
3923
3924 #ifndef PRODUCT
3925
3926 void AdapterHandlerLibrary::print_statistics() {
3927 print_table_statistics();
3928 }
3929
3930 #endif /* PRODUCT */
3931
3932 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3933 assert(current == JavaThread::current(), "pre-condition");
3934 StackOverflow* overflow_state = current->stack_overflow_state();
3935 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3936 overflow_state->set_reserved_stack_activation(current->stack_base());
3937 JRT_END
3938
3939 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3940 ResourceMark rm(current);
3941 frame activation;
3942 nmethod* nm = nullptr;
3943 int count = 1;
3944
3945 assert(fr.is_java_frame(), "Must start on Java frame");
3946
3947 RegisterMap map(JavaThread::current(),
3948 RegisterMap::UpdateMap::skip,
3949 RegisterMap::ProcessFrames::skip,
3950 RegisterMap::WalkContinuation::skip); // don't walk continuations
3951 for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
3952 if (!fr.is_java_frame()) {
3953 continue;
3954 }
3955
3956 Method* method = nullptr;
3957 bool found = false;
3958 if (fr.is_interpreted_frame()) {
3959 method = fr.interpreter_frame_method();
3960 if (method != nullptr && method->has_reserved_stack_access()) {
3961 found = true;
3962 }
3963 } else {
3964 CodeBlob* cb = fr.cb();
3965 if (cb != nullptr && cb->is_nmethod()) {
3966 nm = cb->as_nmethod();
3967 method = nm->method();
3968 for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) {
3969 method = sd->method();
3970 if (method != nullptr && method->has_reserved_stack_access()) {
3971 found = true;
3972 }
3973 }
3974 }
3975 }
3976 if (found) {
3977 activation = fr;
3978 warning("Potentially dangerous stack overflow in "
3979 "ReservedStackAccess annotated method %s [%d]",
3980 method->name_and_sig_as_C_string(), count++);
3981 EventReservedStackActivation event;
3982 if (event.should_commit()) {
3983 event.set_method(method);
3984 event.commit();
3985 }
3986 }
3987 }
3988 return activation;
3989 }
3990
3991 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3992 // After any safepoint, just before going back to compiled code,
3993 // we inform the GC that we will be doing initializing writes to
3994 // this object in the future without emitting card-marks, so
3995 // GC may take any compensating steps.
3996
3997 oop new_obj = current->vm_result_oop();
3998 if (new_obj == nullptr) return;
3999
4000 BarrierSet *bs = BarrierSet::barrier_set();
4001 bs->on_slowpath_allocation_exit(current, new_obj);
4002 }
4003
4004 // We are at a compiled code to interpreter call. We need backing
4005 // buffers for all inline type arguments. Allocate an object array to
4006 // hold them (convenient because once we're done with it we don't have
4007 // to worry about freeing it).
4008 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
4009 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
4010 ResourceMark rm;
4011
4012 int nb_slots = 0;
4013 InstanceKlass* holder = callee->method_holder();
4014 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
4015 if (allocate_receiver) {
4016 nb_slots++;
4017 }
4018 int arg_num = callee->is_static() ? 0 : 1;
4019 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4020 BasicType bt = ss.type();
4021 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4022 nb_slots++;
4023 }
4024 if (bt != T_VOID) {
4025 arg_num++;
4026 }
4027 }
4028 objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4029 objArrayHandle array(THREAD, array_oop);
4030 arg_num = callee->is_static() ? 0 : 1;
4031 int i = 0;
4032 if (allocate_receiver) {
4033 InlineKlass* vk = InlineKlass::cast(holder);
4034 oop res = vk->allocate_instance(CHECK_NULL);
4035 array->obj_at_put(i++, res);
4036 }
4037 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4038 BasicType bt = ss.type();
4039 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4040 InlineKlass* vk = ss.as_inline_klass(holder);
4041 assert(vk != nullptr, "Unexpected klass");
4042 oop res = vk->allocate_instance(CHECK_NULL);
4043 array->obj_at_put(i++, res);
4044 }
4045 if (bt != T_VOID) {
4046 arg_num++;
4047 }
4048 }
4049 return array();
4050 }
4051
4052 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
4053 methodHandle callee(current, callee_method);
4054 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
4055 current->set_vm_result_oop(array);
4056 current->set_vm_result_metadata(callee()); // TODO: required to keep callee live?
4057 JRT_END
4058
4059 // We're returning from an interpreted method: load each field into a
4060 // register following the calling convention
4061 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
4062 {
4063 assert(res->klass()->is_inline_klass(), "only inline types here");
4064 ResourceMark rm;
4065 RegisterMap reg_map(current,
4066 RegisterMap::UpdateMap::include,
4067 RegisterMap::ProcessFrames::include,
4068 RegisterMap::WalkContinuation::skip);
4069 frame stubFrame = current->last_frame();
4070 frame callerFrame = stubFrame.sender(®_map);
4071 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
4072
4073 InlineKlass* vk = InlineKlass::cast(res->klass());
4074
4075 const Array<SigEntry>* sig_vk = vk->extended_sig();
4076 const Array<VMRegPair>* regs = vk->return_regs();
4077
4078 if (regs == nullptr) {
4079 // The fields of the inline klass don't fit in registers, bail out
4080 return;
4081 }
4082
4083 int j = 1;
4084 for (int i = 0; i < sig_vk->length(); i++) {
4085 BasicType bt = sig_vk->at(i)._bt;
4086 if (bt == T_METADATA) {
4087 continue;
4088 }
4089 if (bt == T_VOID) {
4090 if (sig_vk->at(i-1)._bt == T_LONG ||
4091 sig_vk->at(i-1)._bt == T_DOUBLE) {
4092 j++;
4093 }
4094 continue;
4095 }
4096 int off = sig_vk->at(i)._offset;
4097 assert(off > 0, "offset in object should be positive");
4098 VMRegPair pair = regs->at(j);
4099 address loc = reg_map.location(pair.first(), nullptr);
4100 switch(bt) {
4101 case T_BOOLEAN:
4102 *(jboolean*)loc = res->bool_field(off);
4103 break;
4104 case T_CHAR:
4105 *(jchar*)loc = res->char_field(off);
4106 break;
4107 case T_BYTE:
4108 *(jbyte*)loc = res->byte_field(off);
4109 break;
4110 case T_SHORT:
4111 *(jshort*)loc = res->short_field(off);
4112 break;
4113 case T_INT: {
4114 *(jint*)loc = res->int_field(off);
4115 break;
4116 }
4117 case T_LONG:
4118 #ifdef _LP64
4119 *(intptr_t*)loc = res->long_field(off);
4120 #else
4121 Unimplemented();
4122 #endif
4123 break;
4124 case T_OBJECT:
4125 case T_ARRAY: {
4126 *(oop*)loc = res->obj_field(off);
4127 break;
4128 }
4129 case T_FLOAT:
4130 *(jfloat*)loc = res->float_field(off);
4131 break;
4132 case T_DOUBLE:
4133 *(jdouble*)loc = res->double_field(off);
4134 break;
4135 default:
4136 ShouldNotReachHere();
4137 }
4138 j++;
4139 }
4140 assert(j == regs->length(), "missed a field?");
4141
4142 #ifdef ASSERT
4143 VMRegPair pair = regs->at(0);
4144 address loc = reg_map.location(pair.first(), nullptr);
4145 assert(*(oopDesc**)loc == res, "overwritten object");
4146 #endif
4147
4148 current->set_vm_result_oop(res);
4149 }
4150 JRT_END
4151
4152 // We've returned to an interpreted method, the interpreter needs a
4153 // reference to an inline type instance. Allocate it and initialize it
4154 // from field's values in registers.
4155 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
4156 {
4157 ResourceMark rm;
4158 RegisterMap reg_map(current,
4159 RegisterMap::UpdateMap::include,
4160 RegisterMap::ProcessFrames::include,
4161 RegisterMap::WalkContinuation::skip);
4162 frame stubFrame = current->last_frame();
4163 frame callerFrame = stubFrame.sender(®_map);
4164
4165 #ifdef ASSERT
4166 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
4167 #endif
4168
4169 if (!is_set_nth_bit(res, 0)) {
4170 // We're not returning with inline type fields in registers (the
4171 // calling convention didn't allow it for this inline klass)
4172 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
4173 current->set_vm_result_oop((oopDesc*)res);
4174 assert(verif_vk == nullptr, "broken calling convention");
4175 return;
4176 }
4177
4178 clear_nth_bit(res, 0);
4179 InlineKlass* vk = (InlineKlass*)res;
4180 assert(verif_vk == vk, "broken calling convention");
4181 assert(Metaspace::contains((void*)res), "should be klass");
4182
4183 // Allocate handles for every oop field so they are safe in case of
4184 // a safepoint when allocating
4185 GrowableArray<Handle> handles;
4186 vk->save_oop_fields(reg_map, handles);
4187
4188 // It's unsafe to safepoint until we are here
4189 JRT_BLOCK;
4190 {
4191 JavaThread* THREAD = current;
4192 oop vt = vk->realloc_result(reg_map, handles, CHECK);
4193 current->set_vm_result_oop(vt);
4194 }
4195 JRT_BLOCK_END;
4196 }
4197 JRT_END