1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotCompressedPointers.hpp"
26 #include "cds/archiveBuilder.hpp"
27 #include "cds/archiveUtils.inline.hpp"
28 #include "classfile/classLoader.hpp"
29 #include "classfile/compactHashtable.hpp"
30 #include "classfile/javaClasses.inline.hpp"
31 #include "classfile/stringTable.hpp"
32 #include "classfile/vmClasses.hpp"
33 #include "classfile/vmSymbols.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeCache.hpp"
36 #include "code/compiledIC.hpp"
37 #include "code/nmethod.inline.hpp"
38 #include "code/scopeDesc.hpp"
39 #include "code/vtableStubs.hpp"
40 #include "compiler/abstractCompiler.hpp"
41 #include "compiler/compileBroker.hpp"
42 #include "compiler/disassembler.hpp"
43 #include "gc/shared/barrierSet.hpp"
44 #include "gc/shared/collectedHeap.hpp"
45 #include "interpreter/interpreter.hpp"
46 #include "interpreter/interpreterRuntime.hpp"
47 #include "jfr/jfrEvents.hpp"
48 #include "jvm.h"
49 #include "logging/log.hpp"
50 #include "memory/oopFactory.hpp"
51 #include "memory/resourceArea.hpp"
52 #include "memory/universe.hpp"
53 #include "metaprogramming/primitiveConversions.hpp"
54 #include "oops/access.hpp"
55 #include "oops/fieldStreams.inline.hpp"
56 #include "oops/inlineKlass.inline.hpp"
57 #include "oops/klass.hpp"
58 #include "oops/method.inline.hpp"
59 #include "oops/objArrayKlass.hpp"
60 #include "oops/objArrayOop.inline.hpp"
61 #include "oops/oop.inline.hpp"
62 #include "prims/forte.hpp"
63 #include "prims/jvmtiExport.hpp"
64 #include "prims/jvmtiThreadState.hpp"
65 #include "prims/methodHandles.hpp"
66 #include "prims/nativeLookup.hpp"
67 #include "runtime/arguments.hpp"
68 #include "runtime/atomicAccess.hpp"
69 #include "runtime/basicLock.inline.hpp"
70 #include "runtime/frame.inline.hpp"
71 #include "runtime/handles.inline.hpp"
72 #include "runtime/init.hpp"
73 #include "runtime/interfaceSupport.inline.hpp"
74 #include "runtime/java.hpp"
75 #include "runtime/javaCalls.hpp"
76 #include "runtime/jniHandles.inline.hpp"
77 #include "runtime/osThread.hpp"
78 #include "runtime/perfData.hpp"
79 #include "runtime/sharedRuntime.hpp"
80 #include "runtime/signature.hpp"
81 #include "runtime/stackWatermarkSet.hpp"
82 #include "runtime/stubRoutines.hpp"
83 #include "runtime/synchronizer.hpp"
84 #include "runtime/timerTrace.hpp"
85 #include "runtime/vframe.inline.hpp"
86 #include "runtime/vframeArray.hpp"
87 #include "runtime/vm_version.hpp"
88 #include "utilities/copy.hpp"
89 #include "utilities/dtrace.hpp"
90 #include "utilities/events.hpp"
91 #include "utilities/exceptions.hpp"
92 #include "utilities/globalDefinitions.hpp"
93 #include "utilities/hashTable.hpp"
94 #include "utilities/macros.hpp"
95 #include "utilities/xmlstream.hpp"
96 #ifdef COMPILER1
97 #include "c1/c1_Runtime1.hpp"
98 #endif
99 #ifdef COMPILER2
100 #include "opto/runtime.hpp"
101 #endif
102 #if INCLUDE_JFR
103 #include "jfr/jfr.inline.hpp"
104 #endif
105
106 // Shared runtime stub routines reside in their own unique blob with a
107 // single entry point
108
109
110 #define SHARED_STUB_FIELD_DEFINE(name, type) \
111 type* SharedRuntime::BLOB_FIELD_NAME(name);
112 SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE)
113 #undef SHARED_STUB_FIELD_DEFINE
114
115 nmethod* SharedRuntime::_cont_doYield_stub;
116
117 #if 0
118 // TODO tweak global stub name generation to match this
119 #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob",
120 const char *SharedRuntime::_stub_names[] = {
121 SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE)
122 };
123 #endif
124
125 //----------------------------generate_stubs-----------------------------------
126 void SharedRuntime::generate_initial_stubs() {
127 // Build this early so it's available for the interpreter.
128 _throw_StackOverflowError_blob =
129 generate_throw_exception(StubId::shared_throw_StackOverflowError_id,
130 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
131 }
132
133 void SharedRuntime::generate_stubs() {
134 _wrong_method_blob =
135 generate_resolve_blob(StubId::shared_wrong_method_id,
136 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method));
137 _wrong_method_abstract_blob =
138 generate_resolve_blob(StubId::shared_wrong_method_abstract_id,
139 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract));
140 _ic_miss_blob =
141 generate_resolve_blob(StubId::shared_ic_miss_id,
142 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss));
143 _resolve_opt_virtual_call_blob =
144 generate_resolve_blob(StubId::shared_resolve_opt_virtual_call_id,
145 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C));
146 _resolve_virtual_call_blob =
147 generate_resolve_blob(StubId::shared_resolve_virtual_call_id,
148 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C));
149 _resolve_static_call_blob =
150 generate_resolve_blob(StubId::shared_resolve_static_call_id,
151 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C));
152
153 _throw_delayed_StackOverflowError_blob =
154 generate_throw_exception(StubId::shared_throw_delayed_StackOverflowError_id,
155 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
156
157 _throw_AbstractMethodError_blob =
158 generate_throw_exception(StubId::shared_throw_AbstractMethodError_id,
159 CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
160
161 _throw_IncompatibleClassChangeError_blob =
162 generate_throw_exception(StubId::shared_throw_IncompatibleClassChangeError_id,
163 CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
164
165 _throw_NullPointerException_at_call_blob =
166 generate_throw_exception(StubId::shared_throw_NullPointerException_at_call_id,
167 CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
168
169 #if COMPILER2_OR_JVMCI
170 // Vectors are generated only by C2 and JVMCI.
171 bool support_wide = is_wide_vector(MaxVectorSize);
172 if (support_wide) {
173 _polling_page_vectors_safepoint_handler_blob =
174 generate_handler_blob(StubId::shared_polling_page_vectors_safepoint_handler_id,
175 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
176 }
177 #endif // COMPILER2_OR_JVMCI
178 _polling_page_safepoint_handler_blob =
179 generate_handler_blob(StubId::shared_polling_page_safepoint_handler_id,
180 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
181 _polling_page_return_handler_blob =
182 generate_handler_blob(StubId::shared_polling_page_return_handler_id,
183 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
184
185 generate_deopt_blob();
186
187 #if INCLUDE_CDS
188 // disallow any further generation of runtime stubs
189 AOTCodeCache::set_shared_stubs_complete();
190 #endif // INCLUDE_CDS
191 }
192
193 void SharedRuntime::init_adapter_library() {
194 AdapterHandlerLibrary::initialize();
195 }
196
197 #if INCLUDE_JFR
198 //------------------------------generate jfr runtime stubs ------
199 void SharedRuntime::generate_jfr_stubs() {
200 ResourceMark rm;
201 const char* timer_msg = "SharedRuntime generate_jfr_stubs";
202 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
203
204 _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
205 _jfr_return_lease_blob = generate_jfr_return_lease();
206 }
207
208 #endif // INCLUDE_JFR
209
210 #include <math.h>
211
212 // Implementation of SharedRuntime
213
214 #ifndef PRODUCT
215 // For statistics
216 uint SharedRuntime::_ic_miss_ctr = 0;
217 uint SharedRuntime::_wrong_method_ctr = 0;
218 uint SharedRuntime::_resolve_static_ctr = 0;
219 uint SharedRuntime::_resolve_virtual_ctr = 0;
220 uint SharedRuntime::_resolve_opt_virtual_ctr = 0;
221 uint SharedRuntime::_implicit_null_throws = 0;
222 uint SharedRuntime::_implicit_div0_throws = 0;
223
224 int64_t SharedRuntime::_nof_normal_calls = 0;
225 int64_t SharedRuntime::_nof_inlined_calls = 0;
226 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
227 int64_t SharedRuntime::_nof_static_calls = 0;
228 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
229 int64_t SharedRuntime::_nof_interface_calls = 0;
230 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
231
232 uint SharedRuntime::_new_instance_ctr=0;
233 uint SharedRuntime::_new_array_ctr=0;
234 uint SharedRuntime::_multi2_ctr=0;
235 uint SharedRuntime::_multi3_ctr=0;
236 uint SharedRuntime::_multi4_ctr=0;
237 uint SharedRuntime::_multi5_ctr=0;
238 uint SharedRuntime::_mon_enter_stub_ctr=0;
239 uint SharedRuntime::_mon_exit_stub_ctr=0;
240 uint SharedRuntime::_mon_enter_ctr=0;
241 uint SharedRuntime::_mon_exit_ctr=0;
242 uint SharedRuntime::_partial_subtype_ctr=0;
243 uint SharedRuntime::_jbyte_array_copy_ctr=0;
244 uint SharedRuntime::_jshort_array_copy_ctr=0;
245 uint SharedRuntime::_jint_array_copy_ctr=0;
246 uint SharedRuntime::_jlong_array_copy_ctr=0;
247 uint SharedRuntime::_oop_array_copy_ctr=0;
248 uint SharedRuntime::_checkcast_array_copy_ctr=0;
249 uint SharedRuntime::_unsafe_array_copy_ctr=0;
250 uint SharedRuntime::_generic_array_copy_ctr=0;
251 uint SharedRuntime::_slow_array_copy_ctr=0;
252 uint SharedRuntime::_find_handler_ctr=0;
253 uint SharedRuntime::_rethrow_ctr=0;
254 uint SharedRuntime::_unsafe_set_memory_ctr=0;
255
256 int SharedRuntime::_ICmiss_index = 0;
257 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
258 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
259
260
261 void SharedRuntime::trace_ic_miss(address at) {
262 for (int i = 0; i < _ICmiss_index; i++) {
263 if (_ICmiss_at[i] == at) {
264 _ICmiss_count[i]++;
265 return;
266 }
267 }
268 int index = _ICmiss_index++;
269 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
270 _ICmiss_at[index] = at;
271 _ICmiss_count[index] = 1;
272 }
273
274 void SharedRuntime::print_ic_miss_histogram() {
275 if (ICMissHistogram) {
276 tty->print_cr("IC Miss Histogram:");
277 int tot_misses = 0;
278 for (int i = 0; i < _ICmiss_index; i++) {
279 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
280 tot_misses += _ICmiss_count[i];
281 }
282 tty->print_cr("Total IC misses: %7d", tot_misses);
283 }
284 }
285
286 #ifdef COMPILER2
287 // Runtime methods for printf-style debug nodes (same printing format as fieldDescriptor::print_on_for)
288 void SharedRuntime::debug_print_value(jboolean x) {
289 tty->print_cr("boolean %d", x);
290 }
291
292 void SharedRuntime::debug_print_value(jbyte x) {
293 tty->print_cr("byte %d", x);
294 }
295
296 void SharedRuntime::debug_print_value(jshort x) {
297 tty->print_cr("short %d", x);
298 }
299
300 void SharedRuntime::debug_print_value(jchar x) {
301 tty->print_cr("char %c %d", isprint(x) ? x : ' ', x);
302 }
303
304 void SharedRuntime::debug_print_value(jint x) {
305 tty->print_cr("int %d", x);
306 }
307
308 void SharedRuntime::debug_print_value(jlong x) {
309 tty->print_cr("long " JLONG_FORMAT, x);
310 }
311
312 void SharedRuntime::debug_print_value(jfloat x) {
313 tty->print_cr("float %f", x);
314 }
315
316 void SharedRuntime::debug_print_value(jdouble x) {
317 tty->print_cr("double %lf", x);
318 }
319
320 void SharedRuntime::debug_print_value(oopDesc* x) {
321 x->print();
322 }
323 #endif // COMPILER2
324
325 #endif // PRODUCT
326
327
328 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
329 return x * y;
330 JRT_END
331
332
333 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
334 if (x == min_jlong && y == CONST64(-1)) {
335 return x;
336 } else {
337 return x / y;
338 }
339 JRT_END
340
341
342 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
343 if (x == min_jlong && y == CONST64(-1)) {
344 return 0;
345 } else {
346 return x % y;
347 }
348 JRT_END
349
350
351 #ifdef _WIN64
352 const juint float_sign_mask = 0x7FFFFFFF;
353 const juint float_infinity = 0x7F800000;
354 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
355 const julong double_infinity = CONST64(0x7FF0000000000000);
356 #endif
357
358 #if !defined(X86)
359 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
360 #ifdef _WIN64
361 // 64-bit Windows on amd64 returns the wrong values for
362 // infinity operands.
363 juint xbits = PrimitiveConversions::cast<juint>(x);
364 juint ybits = PrimitiveConversions::cast<juint>(y);
365 // x Mod Infinity == x unless x is infinity
366 if (((xbits & float_sign_mask) != float_infinity) &&
367 ((ybits & float_sign_mask) == float_infinity) ) {
368 return x;
369 }
370 return ((jfloat)fmod_winx64((double)x, (double)y));
371 #else
372 return ((jfloat)fmod((double)x,(double)y));
373 #endif
374 JRT_END
375
376 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
377 #ifdef _WIN64
378 julong xbits = PrimitiveConversions::cast<julong>(x);
379 julong ybits = PrimitiveConversions::cast<julong>(y);
380 // x Mod Infinity == x unless x is infinity
381 if (((xbits & double_sign_mask) != double_infinity) &&
382 ((ybits & double_sign_mask) == double_infinity) ) {
383 return x;
384 }
385 return ((jdouble)fmod_winx64((double)x, (double)y));
386 #else
387 return ((jdouble)fmod((double)x,(double)y));
388 #endif
389 JRT_END
390 #endif // !X86
391
392 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
393 return (jfloat)x;
394 JRT_END
395
396 #ifdef __SOFTFP__
397 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
398 return x + y;
399 JRT_END
400
401 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
402 return x - y;
403 JRT_END
404
405 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
406 return x * y;
407 JRT_END
408
409 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
410 return x / y;
411 JRT_END
412
413 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
414 return x + y;
415 JRT_END
416
417 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
418 return x - y;
419 JRT_END
420
421 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
422 return x * y;
423 JRT_END
424
425 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
426 return x / y;
427 JRT_END
428
429 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
430 return (jdouble)x;
431 JRT_END
432
433 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
434 return (jdouble)x;
435 JRT_END
436
437 JRT_LEAF(int, SharedRuntime::fcmpl(float x, float y))
438 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan*/
439 JRT_END
440
441 JRT_LEAF(int, SharedRuntime::fcmpg(float x, float y))
442 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
443 JRT_END
444
445 JRT_LEAF(int, SharedRuntime::dcmpl(double x, double y))
446 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
447 JRT_END
448
449 JRT_LEAF(int, SharedRuntime::dcmpg(double x, double y))
450 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
451 JRT_END
452
453 // Functions to return the opposite of the aeabi functions for nan.
454 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
455 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
456 JRT_END
457
458 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
459 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
460 JRT_END
461
462 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
463 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
464 JRT_END
465
466 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
467 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
468 JRT_END
469
470 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
471 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
472 JRT_END
473
474 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
475 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
476 JRT_END
477
478 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
479 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
480 JRT_END
481
482 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
483 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
484 JRT_END
485
486 // Intrinsics make gcc generate code for these.
487 float SharedRuntime::fneg(float f) {
488 return -f;
489 }
490
491 double SharedRuntime::dneg(double f) {
492 return -f;
493 }
494
495 #endif // __SOFTFP__
496
497 #if defined(__SOFTFP__) || defined(E500V2)
498 // Intrinsics make gcc generate code for these.
499 double SharedRuntime::dabs(double f) {
500 return (f <= (double)0.0) ? (double)0.0 - f : f;
501 }
502
503 #endif
504
505 #if defined(__SOFTFP__)
506 double SharedRuntime::dsqrt(double f) {
507 return sqrt(f);
508 }
509 #endif
510
511 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
512 if (g_isnan(x))
513 return 0;
514 if (x >= (jfloat) max_jint)
515 return max_jint;
516 if (x <= (jfloat) min_jint)
517 return min_jint;
518 return (jint) x;
519 JRT_END
520
521
522 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
523 if (g_isnan(x))
524 return 0;
525 if (x >= (jfloat) max_jlong)
526 return max_jlong;
527 if (x <= (jfloat) min_jlong)
528 return min_jlong;
529 return (jlong) x;
530 JRT_END
531
532
533 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
534 if (g_isnan(x))
535 return 0;
536 if (x >= (jdouble) max_jint)
537 return max_jint;
538 if (x <= (jdouble) min_jint)
539 return min_jint;
540 return (jint) x;
541 JRT_END
542
543
544 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
545 if (g_isnan(x))
546 return 0;
547 if (x >= (jdouble) max_jlong)
548 return max_jlong;
549 if (x <= (jdouble) min_jlong)
550 return min_jlong;
551 return (jlong) x;
552 JRT_END
553
554
555 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
556 return (jfloat)x;
557 JRT_END
558
559
560 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
561 return (jfloat)x;
562 JRT_END
563
564
565 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
566 return (jdouble)x;
567 JRT_END
568
569
570 // Exception handling across interpreter/compiler boundaries
571 //
572 // exception_handler_for_return_address(...) returns the continuation address.
573 // The continuation address is the entry point of the exception handler of the
574 // previous frame depending on the return address.
575
576 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
577 // Note: This is called when we have unwound the frame of the callee that did
578 // throw an exception. So far, no check has been performed by the StackWatermarkSet.
579 // Notably, the stack is not walkable at this point, and hence the check must
580 // be deferred until later. Specifically, any of the handlers returned here in
581 // this function, will get dispatched to, and call deferred checks to
582 // StackWatermarkSet::after_unwind at a point where the stack is walkable.
583 assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
584 assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
585
586 #if INCLUDE_JVMCI
587 // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
588 // and other exception handler continuations do not read it
589 current->set_exception_pc(nullptr);
590 #endif // INCLUDE_JVMCI
591
592 if (Continuation::is_return_barrier_entry(return_address)) {
593 return StubRoutines::cont_returnBarrierExc();
594 }
595
596 // The fastest case first
597 CodeBlob* blob = CodeCache::find_blob(return_address);
598 nmethod* nm = (blob != nullptr) ? blob->as_nmethod_or_null() : nullptr;
599 if (nm != nullptr) {
600 // native nmethods don't have exception handlers
601 assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
602 assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
603 if (nm->is_deopt_pc(return_address)) {
604 // If we come here because of a stack overflow, the stack may be
605 // unguarded. Reguard the stack otherwise if we return to the
606 // deopt blob and the stack bang causes a stack overflow we
607 // crash.
608 StackOverflow* overflow_state = current->stack_overflow_state();
609 bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
610 if (overflow_state->reserved_stack_activation() != current->stack_base()) {
611 overflow_state->set_reserved_stack_activation(current->stack_base());
612 }
613 assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
614 // The deferred StackWatermarkSet::after_unwind check will be performed in
615 // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
616 return SharedRuntime::deopt_blob()->unpack_with_exception();
617 } else {
618 // The deferred StackWatermarkSet::after_unwind check will be performed in
619 // * OptoRuntime::handle_exception_C_helper for C2 code
620 // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
621 #ifdef COMPILER2
622 if (nm->compiler_type() == compiler_c2) {
623 return OptoRuntime::exception_blob()->entry_point();
624 }
625 #endif // COMPILER2
626 return nm->exception_begin();
627 }
628 }
629
630 // Entry code
631 if (StubRoutines::returns_to_call_stub(return_address)) {
632 // The deferred StackWatermarkSet::after_unwind check will be performed in
633 // JavaCallWrapper::~JavaCallWrapper
634 assert (StubRoutines::catch_exception_entry() != nullptr, "must be generated before");
635 return StubRoutines::catch_exception_entry();
636 }
637 if (blob != nullptr && blob->is_upcall_stub()) {
638 return StubRoutines::upcall_stub_exception_handler();
639 }
640 // Interpreted code
641 if (Interpreter::contains(return_address)) {
642 // The deferred StackWatermarkSet::after_unwind check will be performed in
643 // InterpreterRuntime::exception_handler_for_exception
644 return Interpreter::rethrow_exception_entry();
645 }
646
647 guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
648 guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
649
650 #ifndef PRODUCT
651 { ResourceMark rm;
652 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
653 os::print_location(tty, (intptr_t)return_address);
654 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
655 tty->print_cr("b) other problem");
656 }
657 #endif // PRODUCT
658 ShouldNotReachHere();
659 return nullptr;
660 }
661
662
663 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
664 return raw_exception_handler_for_return_address(current, return_address);
665 JRT_END
666
667
668 address SharedRuntime::get_poll_stub(address pc) {
669 address stub;
670 // Look up the code blob
671 CodeBlob *cb = CodeCache::find_blob(pc);
672
673 // Should be an nmethod
674 guarantee(cb != nullptr && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod");
675
676 // Look up the relocation information
677 assert(cb->as_nmethod()->is_at_poll_or_poll_return(pc),
678 "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
679
680 #ifdef ASSERT
681 if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
682 tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
683 Disassembler::decode(cb);
684 fatal("Only polling locations are used for safepoint");
685 }
686 #endif
687
688 bool at_poll_return = cb->as_nmethod()->is_at_poll_return(pc);
689 bool has_wide_vectors = cb->as_nmethod()->has_wide_vectors();
690 if (at_poll_return) {
691 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
692 "polling page return stub not created yet");
693 stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
694 } else if (has_wide_vectors) {
695 assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr,
696 "polling page vectors safepoint stub not created yet");
697 stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
698 } else {
699 assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr,
700 "polling page safepoint stub not created yet");
701 stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
702 }
703 log_trace(safepoint)("Polling page exception: thread = " INTPTR_FORMAT " [%d], pc = "
704 INTPTR_FORMAT " (%s), stub = " INTPTR_FORMAT,
705 p2i(Thread::current()),
706 Thread::current()->osthread()->thread_id(),
707 p2i(pc),
708 at_poll_return ? "return" : "loop",
709 p2i(stub));
710 return stub;
711 }
712
713 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
714 if (JvmtiExport::can_post_on_exceptions()) {
715 vframeStream vfst(current, true);
716 methodHandle method = methodHandle(current, vfst.method());
717 address bcp = method()->bcp_from(vfst.bci());
718 JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
719 }
720
721 #if INCLUDE_JVMCI
722 if (EnableJVMCI) {
723 vframeStream vfst(current, true);
724 methodHandle method = methodHandle(current, vfst.method());
725 int bci = vfst.bci();
726 MethodData* trap_mdo = method->method_data();
727 if (trap_mdo != nullptr) {
728 // Set exception_seen if the exceptional bytecode is an invoke
729 Bytecode_invoke call = Bytecode_invoke_check(method, bci);
730 if (call.is_valid()) {
731 ResourceMark rm(current);
732
733 // Lock to read ProfileData, and ensure lock is not broken by a safepoint
734 MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
735
736 ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
737 if (pdata != nullptr && pdata->is_BitData()) {
738 BitData* bit_data = (BitData*) pdata;
739 bit_data->set_exception_seen();
740 }
741 }
742 }
743 }
744 #endif
745
746 Exceptions::_throw(current, __FILE__, __LINE__, h_exception);
747 }
748
749 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message) {
750 Handle h_exception = Exceptions::new_exception(current, name, message);
751 throw_and_post_jvmti_exception(current, h_exception);
752 }
753
754 // The interpreter code to call this tracing function is only
755 // called/generated when UL is on for redefine, class and has the right level
756 // and tags. Since obsolete methods are never compiled, we don't have
757 // to modify the compilers to generate calls to this function.
758 //
759 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
760 JavaThread* thread, Method* method))
761 if (method->is_obsolete()) {
762 // We are calling an obsolete method, but this is not necessarily
763 // an error. Our method could have been redefined just after we
764 // fetched the Method* from the constant pool.
765 ResourceMark rm;
766 log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
767 }
768 return 0;
769 JRT_END
770
771 // ret_pc points into caller; we are returning caller's exception handler
772 // for given exception
773 // Note that the implementation of this method assumes it's only called when an exception has actually occured
774 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
775 bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
776 assert(nm != nullptr, "must exist");
777 ResourceMark rm;
778
779 #if INCLUDE_JVMCI
780 if (nm->is_compiled_by_jvmci()) {
781 // lookup exception handler for this pc
782 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
783 ExceptionHandlerTable table(nm);
784 HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
785 if (t != nullptr) {
786 return nm->code_begin() + t->pco();
787 } else {
788 bool make_not_entrant = true;
789 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
790 }
791 }
792 #endif // INCLUDE_JVMCI
793
794 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
795 // determine handler bci, if any
796 EXCEPTION_MARK;
797
798 Handle orig_exception(THREAD, exception());
799
800 int handler_bci = -1;
801 int scope_depth = 0;
802 if (!force_unwind) {
803 int bci = sd->bci();
804 bool recursive_exception = false;
805 do {
806 bool skip_scope_increment = false;
807 // exception handler lookup
808 Klass* ek = exception->klass();
809 methodHandle mh(THREAD, sd->method());
810 handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD);
811 if (HAS_PENDING_EXCEPTION) {
812 recursive_exception = true;
813 // We threw an exception while trying to find the exception handler.
814 // Transfer the new exception to the exception handle which will
815 // be set into thread local storage, and do another lookup for an
816 // exception handler for this exception, this time starting at the
817 // BCI of the exception handler which caused the exception to be
818 // thrown (bugs 4307310 and 4546590). Set "exception" reference
819 // argument to ensure that the correct exception is thrown (4870175).
820 recursive_exception_occurred = true;
821 exception.replace(PENDING_EXCEPTION);
822 CLEAR_PENDING_EXCEPTION;
823 if (handler_bci >= 0) {
824 bci = handler_bci;
825 handler_bci = -1;
826 skip_scope_increment = true;
827 }
828 }
829 else {
830 recursive_exception = false;
831 }
832 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
833 sd = sd->sender();
834 if (sd != nullptr) {
835 bci = sd->bci();
836 }
837 ++scope_depth;
838 }
839 } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr));
840 }
841
842 // found handling method => lookup exception handler
843 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
844
845 ExceptionHandlerTable table(nm);
846 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
847
848 // If the compiler did not anticipate a recursive exception, resulting in an exception
849 // thrown from the catch bci, then the compiled exception handler might be missing.
850 // This is rare. Just deoptimize and let the interpreter rethrow the original
851 // exception at the original bci.
852 if (t == nullptr && recursive_exception_occurred) {
853 exception.replace(orig_exception()); // restore original exception
854 bool make_not_entrant = false;
855 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
856 }
857
858 if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
859 // Allow abbreviated catch tables. The idea is to allow a method
860 // to materialize its exceptions without committing to the exact
861 // routing of exceptions. In particular this is needed for adding
862 // a synthetic handler to unlock monitors when inlining
863 // synchronized methods since the unlock path isn't represented in
864 // the bytecodes.
865 t = table.entry_for(catch_pco, -1, 0);
866 }
867
868 #ifdef COMPILER1
869 if (t == nullptr && nm->is_compiled_by_c1()) {
870 assert(nm->unwind_handler_begin() != nullptr, "");
871 return nm->unwind_handler_begin();
872 }
873 #endif
874
875 if (t == nullptr) {
876 ttyLocker ttyl;
877 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
878 tty->print_cr(" Exception:");
879 exception->print();
880 tty->cr();
881 tty->print_cr(" Compiled exception table :");
882 table.print();
883 nm->print();
884 nm->print_code();
885 guarantee(false, "missing exception handler");
886 return nullptr;
887 }
888
889 if (handler_bci != -1) { // did we find a handler in this method?
890 sd->method()->set_exception_handler_entered(handler_bci); // profile
891 }
892 return nm->code_begin() + t->pco();
893 }
894
895 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
896 // These errors occur only at call sites
897 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
898 JRT_END
899
900 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
901 // These errors occur only at call sites
902 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
903 JRT_END
904
905 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
906 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
907 JRT_END
908
909 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current))
910 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
911 JRT_END
912
913 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current))
914 // This entry point is effectively only used for NullPointerExceptions which occur at inline
915 // cache sites (when the callee activation is not yet set up) so we are at a call site
916 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
917 JRT_END
918
919 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current))
920 throw_StackOverflowError_common(current, false);
921 JRT_END
922
923 JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* current))
924 throw_StackOverflowError_common(current, true);
925 JRT_END
926
927 void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool delayed) {
928 // We avoid using the normal exception construction in this case because
929 // it performs an upcall to Java, and we're already out of stack space.
930 JavaThread* THREAD = current; // For exception macros.
931 InstanceKlass* k = vmClasses::StackOverflowError_klass();
932 oop exception_oop = k->allocate_instance(CHECK);
933 if (delayed) {
934 java_lang_Throwable::set_message(exception_oop,
935 Universe::delayed_stack_overflow_error_message());
936 }
937 Handle exception (current, exception_oop);
938 if (StackTraceInThrowable) {
939 java_lang_Throwable::fill_in_stack_trace(exception);
940 }
941 // Remove the ScopedValue bindings in case we got a
942 // StackOverflowError while we were trying to remove ScopedValue
943 // bindings.
944 current->clear_scopedValueBindings();
945 // Increment counter for hs_err file reporting
946 Exceptions::increment_stack_overflow_errors();
947 throw_and_post_jvmti_exception(current, exception);
948 }
949
950 address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
951 address pc,
952 ImplicitExceptionKind exception_kind)
953 {
954 address target_pc = nullptr;
955
956 if (Interpreter::contains(pc)) {
957 switch (exception_kind) {
958 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
959 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
960 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
961 default: ShouldNotReachHere();
962 }
963 } else {
964 switch (exception_kind) {
965 case STACK_OVERFLOW: {
966 // Stack overflow only occurs upon frame setup; the callee is
967 // going to be unwound. Dispatch to a shared runtime stub
968 // which will cause the StackOverflowError to be fabricated
969 // and processed.
970 // Stack overflow should never occur during deoptimization:
971 // the compiled method bangs the stack by as much as the
972 // interpreter would need in case of a deoptimization. The
973 // deoptimization blob and uncommon trap blob bang the stack
974 // in a debug VM to verify the correctness of the compiled
975 // method stack banging.
976 assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
977 Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
978 return SharedRuntime::throw_StackOverflowError_entry();
979 }
980
981 case IMPLICIT_NULL: {
982 if (VtableStubs::contains(pc)) {
983 // We haven't yet entered the callee frame. Fabricate an
984 // exception and begin dispatching it in the caller. Since
985 // the caller was at a call site, it's safe to destroy all
986 // caller-saved registers, as these entry points do.
987 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
988
989 // If vt_stub is null, then return null to signal handler to report the SEGV error.
990 if (vt_stub == nullptr) return nullptr;
991
992 if (vt_stub->is_abstract_method_error(pc)) {
993 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
994 Events::log_exception(current, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
995 // Instead of throwing the abstract method error here directly, we re-resolve
996 // and will throw the AbstractMethodError during resolve. As a result, we'll
997 // get a more detailed error message.
998 return SharedRuntime::get_handle_wrong_method_stub();
999 } else {
1000 Events::log_exception(current, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
1001 // Assert that the signal comes from the expected location in stub code.
1002 assert(vt_stub->is_null_pointer_exception(pc),
1003 "obtained signal from unexpected location in stub code");
1004 return SharedRuntime::throw_NullPointerException_at_call_entry();
1005 }
1006 } else {
1007 CodeBlob* cb = CodeCache::find_blob(pc);
1008
1009 // If code blob is null, then return null to signal handler to report the SEGV error.
1010 if (cb == nullptr) return nullptr;
1011
1012 // Exception happened in CodeCache. Must be either:
1013 // 1. Inline-cache check in C2I handler blob,
1014 // 2. Inline-cache check in nmethod, or
1015 // 3. Implicit null exception in nmethod
1016
1017 if (!cb->is_nmethod()) {
1018 bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
1019 if (!is_in_blob) {
1020 // Allow normal crash reporting to handle this
1021 return nullptr;
1022 }
1023 Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
1024 // There is no handler here, so we will simply unwind.
1025 return SharedRuntime::throw_NullPointerException_at_call_entry();
1026 }
1027
1028 // Otherwise, it's a compiled method. Consult its exception handlers.
1029 nmethod* nm = cb->as_nmethod();
1030 if (nm->inlinecache_check_contains(pc)) {
1031 // exception happened inside inline-cache check code
1032 // => the nmethod is not yet active (i.e., the frame
1033 // is not set up yet) => use return address pushed by
1034 // caller => don't push another return address
1035 Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
1036 return SharedRuntime::throw_NullPointerException_at_call_entry();
1037 }
1038
1039 if (nm->method()->is_method_handle_intrinsic()) {
1040 // exception happened inside MH dispatch code, similar to a vtable stub
1041 Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
1042 return SharedRuntime::throw_NullPointerException_at_call_entry();
1043 }
1044
1045 #ifndef PRODUCT
1046 _implicit_null_throws++;
1047 #endif
1048 target_pc = nm->continuation_for_implicit_null_exception(pc);
1049 // If there's an unexpected fault, target_pc might be null,
1050 // in which case we want to fall through into the normal
1051 // error handling code.
1052 }
1053
1054 break; // fall through
1055 }
1056
1057
1058 case IMPLICIT_DIVIDE_BY_ZERO: {
1059 nmethod* nm = CodeCache::find_nmethod(pc);
1060 guarantee(nm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions");
1061 #ifndef PRODUCT
1062 _implicit_div0_throws++;
1063 #endif
1064 target_pc = nm->continuation_for_implicit_div0_exception(pc);
1065 // If there's an unexpected fault, target_pc might be null,
1066 // in which case we want to fall through into the normal
1067 // error handling code.
1068 break; // fall through
1069 }
1070
1071 default: ShouldNotReachHere();
1072 }
1073
1074 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
1075
1076 if (exception_kind == IMPLICIT_NULL) {
1077 #ifndef PRODUCT
1078 // for AbortVMOnException flag
1079 Exceptions::debug_check_abort("java.lang.NullPointerException");
1080 #endif //PRODUCT
1081 Events::log_exception(current, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1082 } else {
1083 #ifndef PRODUCT
1084 // for AbortVMOnException flag
1085 Exceptions::debug_check_abort("java.lang.ArithmeticException");
1086 #endif //PRODUCT
1087 Events::log_exception(current, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1088 }
1089 return target_pc;
1090 }
1091
1092 ShouldNotReachHere();
1093 return nullptr;
1094 }
1095
1096
1097 /**
1098 * Throws an java/lang/UnsatisfiedLinkError. The address of this method is
1099 * installed in the native function entry of all native Java methods before
1100 * they get linked to their actual native methods.
1101 *
1102 * \note
1103 * This method actually never gets called! The reason is because
1104 * the interpreter's native entries call NativeLookup::lookup() which
1105 * throws the exception when the lookup fails. The exception is then
1106 * caught and forwarded on the return from NativeLookup::lookup() call
1107 * before the call to the native function. This might change in the future.
1108 */
1109 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
1110 {
1111 // We return a bad value here to make sure that the exception is
1112 // forwarded before we look at the return value.
1113 THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
1114 }
1115 JNI_END
1116
1117 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
1118 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
1119 }
1120
1121 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
1122 #if INCLUDE_JVMCI
1123 if (!obj->klass()->has_finalizer()) {
1124 return;
1125 }
1126 #endif // INCLUDE_JVMCI
1127 assert(oopDesc::is_oop(obj), "must be a valid oop");
1128 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1129 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1130 JRT_END
1131
1132 jlong SharedRuntime::get_java_tid(JavaThread* thread) {
1133 assert(thread != nullptr, "No thread");
1134 if (thread == nullptr) {
1135 return 0;
1136 }
1137 guarantee(Thread::current() != thread || thread->is_oop_safe(),
1138 "current cannot touch oops after its GC barrier is detached.");
1139 oop obj = thread->threadObj();
1140 return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj);
1141 }
1142
1143 /**
1144 * This function ought to be a void function, but cannot be because
1145 * it gets turned into a tail-call on sparc, which runs into dtrace bug
1146 * 6254741. Once that is fixed we can remove the dummy return value.
1147 */
1148 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1149 return dtrace_object_alloc(JavaThread::current(), o, o->size());
1150 }
1151
1152 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o) {
1153 return dtrace_object_alloc(thread, o, o->size());
1154 }
1155
1156 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size) {
1157 assert(DTraceAllocProbes, "wrong call");
1158 Klass* klass = o->klass();
1159 Symbol* name = klass->name();
1160 HOTSPOT_OBJECT_ALLOC(
1161 get_java_tid(thread),
1162 (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
1163 return 0;
1164 }
1165
1166 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
1167 JavaThread* current, Method* method))
1168 assert(current == JavaThread::current(), "pre-condition");
1169
1170 assert(DTraceMethodProbes, "wrong call");
1171 Symbol* kname = method->klass_name();
1172 Symbol* name = method->name();
1173 Symbol* sig = method->signature();
1174 HOTSPOT_METHOD_ENTRY(
1175 get_java_tid(current),
1176 (char *) kname->bytes(), kname->utf8_length(),
1177 (char *) name->bytes(), name->utf8_length(),
1178 (char *) sig->bytes(), sig->utf8_length());
1179 return 0;
1180 JRT_END
1181
1182 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
1183 JavaThread* current, Method* method))
1184 assert(current == JavaThread::current(), "pre-condition");
1185 assert(DTraceMethodProbes, "wrong call");
1186 Symbol* kname = method->klass_name();
1187 Symbol* name = method->name();
1188 Symbol* sig = method->signature();
1189 HOTSPOT_METHOD_RETURN(
1190 get_java_tid(current),
1191 (char *) kname->bytes(), kname->utf8_length(),
1192 (char *) name->bytes(), name->utf8_length(),
1193 (char *) sig->bytes(), sig->utf8_length());
1194 return 0;
1195 JRT_END
1196
1197
1198 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
1199 // for a call current in progress, i.e., arguments has been pushed on stack
1200 // put callee has not been invoked yet. Used by: resolve virtual/static,
1201 // vtable updates, etc. Caller frame must be compiled.
1202 Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1203 JavaThread* current = THREAD;
1204 ResourceMark rm(current);
1205
1206 // last java frame on stack (which includes native call frames)
1207 vframeStream vfst(current, true); // Do not skip and javaCalls
1208
1209 return find_callee_info_helper(vfst, bc, callinfo, THREAD);
1210 }
1211
1212 Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
1213 nmethod* caller = vfst.nm();
1214
1215 address pc = vfst.frame_pc();
1216 { // Get call instruction under lock because another thread may be busy patching it.
1217 CompiledICLocker ic_locker(caller);
1218 return caller->attached_method_before_pc(pc);
1219 }
1220 return nullptr;
1221 }
1222
1223 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1224 // for a call current in progress, i.e., arguments has been pushed on stack
1225 // but callee has not been invoked yet. Caller frame must be compiled.
1226 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1227 CallInfo& callinfo, TRAPS) {
1228 Handle receiver;
1229 Handle nullHandle; // create a handy null handle for exception returns
1230 JavaThread* current = THREAD;
1231
1232 assert(!vfst.at_end(), "Java frame must exist");
1233
1234 // Find caller and bci from vframe
1235 methodHandle caller(current, vfst.method());
1236 int bci = vfst.bci();
1237
1238 if (caller->is_continuation_enter_intrinsic()) {
1239 bc = Bytecodes::_invokestatic;
1240 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1241 return receiver;
1242 }
1243
1244 // Substitutability test implementation piggy backs on static call resolution
1245 Bytecodes::Code code = caller->java_code_at(bci);
1246 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1247 bc = Bytecodes::_invokestatic;
1248 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1249 assert(attached_method.not_null(), "must have attached method");
1250 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1251 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1252 #ifdef ASSERT
1253 Symbol* subst_method_name = vmSymbols::isSubstitutable_name();
1254 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(subst_method_name, vmSymbols::object_object_boolean_signature());
1255 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1256 #endif
1257 return receiver;
1258 }
1259
1260 Bytecode_invoke bytecode(caller, bci);
1261 int bytecode_index = bytecode.index();
1262 bc = bytecode.invoke_code();
1263
1264 methodHandle attached_method(current, extract_attached_method(vfst));
1265 if (attached_method.not_null()) {
1266 Method* callee = bytecode.static_target(CHECK_NH);
1267 vmIntrinsics::ID id = callee->intrinsic_id();
1268 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1269 // it attaches statically resolved method to the call site.
1270 if (MethodHandles::is_signature_polymorphic(id) &&
1271 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1272 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1273
1274 // Adjust invocation mode according to the attached method.
1275 switch (bc) {
1276 case Bytecodes::_invokevirtual:
1277 if (attached_method->method_holder()->is_interface()) {
1278 bc = Bytecodes::_invokeinterface;
1279 }
1280 break;
1281 case Bytecodes::_invokeinterface:
1282 if (!attached_method->method_holder()->is_interface()) {
1283 bc = Bytecodes::_invokevirtual;
1284 }
1285 break;
1286 case Bytecodes::_invokehandle:
1287 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1288 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1289 : Bytecodes::_invokevirtual;
1290 }
1291 break;
1292 default:
1293 break;
1294 }
1295 } else {
1296 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1297 if (!attached_method->method_holder()->is_inline_klass()) {
1298 // Ignore the attached method in this case to not confuse below code
1299 attached_method = methodHandle(current, nullptr);
1300 }
1301 }
1302 }
1303
1304 assert(bc != Bytecodes::_illegal, "not initialized");
1305
1306 bool has_receiver = bc != Bytecodes::_invokestatic &&
1307 bc != Bytecodes::_invokedynamic &&
1308 bc != Bytecodes::_invokehandle;
1309 bool check_null_and_abstract = true;
1310
1311 // Find receiver for non-static call
1312 if (has_receiver) {
1313 // This register map must be update since we need to find the receiver for
1314 // compiled frames. The receiver might be in a register.
1315 RegisterMap reg_map2(current,
1316 RegisterMap::UpdateMap::include,
1317 RegisterMap::ProcessFrames::include,
1318 RegisterMap::WalkContinuation::skip);
1319 frame stubFrame = current->last_frame();
1320 // Caller-frame is a compiled frame
1321 frame callerFrame = stubFrame.sender(®_map2);
1322
1323 Method* callee = attached_method();
1324 if (callee == nullptr) {
1325 callee = bytecode.static_target(CHECK_NH);
1326 if (callee == nullptr) {
1327 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1328 }
1329 }
1330 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1331 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1332 // If the receiver is an inline type that is passed as fields, no oop is available
1333 // Resolve the call without receiver null checking.
1334 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1335 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1336 if (bc == Bytecodes::_invokeinterface) {
1337 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1338 }
1339 check_null_and_abstract = false;
1340 } else {
1341 // Retrieve from a compiled argument list
1342 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1343 assert(oopDesc::is_oop_or_null(receiver()), "");
1344 if (receiver.is_null()) {
1345 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1346 }
1347 }
1348 }
1349
1350 // Resolve method
1351 if (attached_method.not_null()) {
1352 // Parameterized by attached method.
1353 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1354 } else {
1355 // Parameterized by bytecode.
1356 constantPoolHandle constants(current, caller->constants());
1357 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1358 }
1359
1360 #ifdef ASSERT
1361 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1362 if (has_receiver && check_null_and_abstract) {
1363 assert(receiver.not_null(), "should have thrown exception");
1364 Klass* receiver_klass = receiver->klass();
1365 Klass* rk = nullptr;
1366 if (attached_method.not_null()) {
1367 // In case there's resolved method attached, use its holder during the check.
1368 rk = attached_method->method_holder();
1369 } else {
1370 // Klass is already loaded.
1371 constantPoolHandle constants(current, caller->constants());
1372 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1373 }
1374 Klass* static_receiver_klass = rk;
1375 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1376 "actual receiver must be subclass of static receiver klass");
1377 if (receiver_klass->is_instance_klass()) {
1378 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1379 tty->print_cr("ERROR: Klass not yet initialized!!");
1380 receiver_klass->print();
1381 }
1382 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1383 }
1384 }
1385 #endif
1386
1387 return receiver;
1388 }
1389
1390 methodHandle SharedRuntime::find_callee_method(bool& caller_does_not_scalarize, TRAPS) {
1391 JavaThread* current = THREAD;
1392 ResourceMark rm(current);
1393 // We need first to check if any Java activations (compiled, interpreted)
1394 // exist on the stack since last JavaCall. If not, we need
1395 // to get the target method from the JavaCall wrapper.
1396 vframeStream vfst(current, true); // Do not skip any javaCalls
1397 methodHandle callee_method;
1398 if (vfst.at_end()) {
1399 // No Java frames were found on stack since we did the JavaCall.
1400 // Hence the stack can only contain an entry_frame. We need to
1401 // find the target method from the stub frame.
1402 RegisterMap reg_map(current,
1403 RegisterMap::UpdateMap::skip,
1404 RegisterMap::ProcessFrames::include,
1405 RegisterMap::WalkContinuation::skip);
1406 frame fr = current->last_frame();
1407 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1408 fr = fr.sender(®_map);
1409 assert(fr.is_entry_frame(), "must be");
1410 // fr is now pointing to the entry frame.
1411 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1412 } else {
1413 Bytecodes::Code bc;
1414 CallInfo callinfo;
1415 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1416 // Calls via mismatching methods are always non-scalarized
1417 if (callinfo.resolved_method()->mismatch()) {
1418 caller_does_not_scalarize = true;
1419 }
1420 callee_method = methodHandle(current, callinfo.selected_method());
1421 }
1422 assert(callee_method()->is_method(), "must be");
1423 return callee_method;
1424 }
1425
1426 // Resolves a call.
1427 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1428 JavaThread* current = THREAD;
1429 ResourceMark rm(current);
1430 RegisterMap cbl_map(current,
1431 RegisterMap::UpdateMap::skip,
1432 RegisterMap::ProcessFrames::include,
1433 RegisterMap::WalkContinuation::skip);
1434 frame caller_frame = current->last_frame().sender(&cbl_map);
1435
1436 CodeBlob* caller_cb = caller_frame.cb();
1437 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1438 nmethod* caller_nm = caller_cb->as_nmethod();
1439
1440 // determine call info & receiver
1441 // note: a) receiver is null for static calls
1442 // b) an exception is thrown if receiver is null for non-static calls
1443 CallInfo call_info;
1444 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1445 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1446
1447 NoSafepointVerifier nsv;
1448
1449 methodHandle callee_method(current, call_info.selected_method());
1450 // Calls via mismatching methods are always non-scalarized
1451 bool mismatch = is_optimized ? call_info.selected_method()->mismatch() : call_info.resolved_method()->mismatch();
1452 if (caller_nm->is_compiled_by_c1() || mismatch) {
1453 caller_does_not_scalarize = true;
1454 }
1455
1456 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1457 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1458 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1459 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1460 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1461
1462 assert(!caller_nm->is_unloading(), "It should not be unloading");
1463
1464 #ifndef PRODUCT
1465 // tracing/debugging/statistics
1466 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1467 (is_virtual) ? (&_resolve_virtual_ctr) :
1468 (&_resolve_static_ctr);
1469 AtomicAccess::inc(addr);
1470
1471 if (TraceCallFixup) {
1472 ResourceMark rm(current);
1473 tty->print("resolving %s%s (%s) %s call to",
1474 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1475 Bytecodes::name(invoke_code), (caller_does_not_scalarize) ? "non-scalar" : "");
1476 callee_method->print_short_name(tty);
1477 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1478 p2i(caller_frame.pc()), p2i(callee_method->code()));
1479 }
1480 #endif
1481
1482 if (invoke_code == Bytecodes::_invokestatic) {
1483 assert(callee_method->method_holder()->is_initialized() ||
1484 callee_method->method_holder()->is_reentrant_initialization(current),
1485 "invalid class initialization state for invoke_static");
1486 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1487 // In order to keep class initialization check, do not patch call
1488 // site for static call when the class is not fully initialized.
1489 // Proper check is enforced by call site re-resolution on every invocation.
1490 //
1491 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1492 // explicit class initialization check is put in nmethod entry (VEP).
1493 assert(callee_method->method_holder()->is_linked(), "must be");
1494 return callee_method;
1495 }
1496 }
1497
1498
1499 // JSR 292 key invariant:
1500 // If the resolved method is a MethodHandle invoke target, the call
1501 // site must be a MethodHandle call site, because the lambda form might tail-call
1502 // leaving the stack in a state unknown to either caller or callee
1503
1504 // Compute entry points. The computation of the entry points is independent of
1505 // patching the call.
1506
1507 // Make sure the callee nmethod does not get deoptimized and removed before
1508 // we are done patching the code.
1509
1510
1511 CompiledICLocker ml(caller_nm);
1512 if (is_virtual && !is_optimized) {
1513 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1514 inline_cache->update(&call_info, receiver->klass(), caller_does_not_scalarize);
1515 } else {
1516 // Callsite is a direct call - set it to the destination method
1517 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1518 callsite->set(callee_method, caller_does_not_scalarize);
1519 }
1520
1521 return callee_method;
1522 }
1523
1524 // Inline caches exist only in compiled code
1525 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1526 #ifdef ASSERT
1527 RegisterMap reg_map(current,
1528 RegisterMap::UpdateMap::skip,
1529 RegisterMap::ProcessFrames::include,
1530 RegisterMap::WalkContinuation::skip);
1531 frame stub_frame = current->last_frame();
1532 assert(stub_frame.is_runtime_frame(), "sanity check");
1533 frame caller_frame = stub_frame.sender(®_map);
1534 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1535 #endif /* ASSERT */
1536
1537 methodHandle callee_method;
1538 bool caller_does_not_scalarize = false;
1539 JRT_BLOCK
1540 callee_method = SharedRuntime::handle_ic_miss_helper(caller_does_not_scalarize, CHECK_NULL);
1541 // Return Method* through TLS
1542 current->set_vm_result_metadata(callee_method());
1543 JRT_BLOCK_END
1544 // return compiled code entry point after potential safepoints
1545 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1546 JRT_END
1547
1548
1549 // Handle call site that has been made non-entrant
1550 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1551 // 6243940 We might end up in here if the callee is deoptimized
1552 // as we race to call it. We don't want to take a safepoint if
1553 // the caller was interpreted because the caller frame will look
1554 // interpreted to the stack walkers and arguments are now
1555 // "compiled" so it is much better to make this transition
1556 // invisible to the stack walking code. The i2c path will
1557 // place the callee method in the callee_target. It is stashed
1558 // there because if we try and find the callee by normal means a
1559 // safepoint is possible and have trouble gc'ing the compiled args.
1560 RegisterMap reg_map(current,
1561 RegisterMap::UpdateMap::skip,
1562 RegisterMap::ProcessFrames::include,
1563 RegisterMap::WalkContinuation::skip);
1564 frame stub_frame = current->last_frame();
1565 assert(stub_frame.is_runtime_frame(), "sanity check");
1566 frame caller_frame = stub_frame.sender(®_map);
1567
1568 if (caller_frame.is_interpreted_frame() ||
1569 caller_frame.is_entry_frame() ||
1570 caller_frame.is_upcall_stub_frame()) {
1571 Method* callee = current->callee_target();
1572 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1573 current->set_vm_result_metadata(callee);
1574 current->set_callee_target(nullptr);
1575 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1576 // Bypass class initialization checks in c2i when caller is in native.
1577 // JNI calls to static methods don't have class initialization checks.
1578 // Fast class initialization checks are present in c2i adapters and call into
1579 // SharedRuntime::handle_wrong_method() on the slow path.
1580 //
1581 // JVM upcalls may land here as well, but there's a proper check present in
1582 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1583 // so bypassing it in c2i adapter is benign.
1584 return callee->get_c2i_no_clinit_check_entry();
1585 } else {
1586 if (caller_frame.is_interpreted_frame()) {
1587 return callee->get_c2i_inline_entry();
1588 } else {
1589 return callee->get_c2i_entry();
1590 }
1591 }
1592 }
1593
1594 // Must be compiled to compiled path which is safe to stackwalk
1595 methodHandle callee_method;
1596 bool is_static_call = false;
1597 bool is_optimized = false;
1598 bool caller_does_not_scalarize = false;
1599 JRT_BLOCK
1600 // Force resolving of caller (if we called from compiled frame)
1601 callee_method = SharedRuntime::reresolve_call_site(is_optimized, caller_does_not_scalarize, CHECK_NULL);
1602 current->set_vm_result_metadata(callee_method());
1603 JRT_BLOCK_END
1604 // return compiled code entry point after potential safepoints
1605 return get_resolved_entry(current, callee_method, callee_method->is_static(), is_optimized, caller_does_not_scalarize);
1606 JRT_END
1607
1608 // Handle abstract method call
1609 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1610 // Verbose error message for AbstractMethodError.
1611 // Get the called method from the invoke bytecode.
1612 vframeStream vfst(current, true);
1613 assert(!vfst.at_end(), "Java frame must exist");
1614 methodHandle caller(current, vfst.method());
1615 Bytecode_invoke invoke(caller, vfst.bci());
1616 DEBUG_ONLY( invoke.verify(); )
1617
1618 // Find the compiled caller frame.
1619 RegisterMap reg_map(current,
1620 RegisterMap::UpdateMap::include,
1621 RegisterMap::ProcessFrames::include,
1622 RegisterMap::WalkContinuation::skip);
1623 frame stubFrame = current->last_frame();
1624 assert(stubFrame.is_runtime_frame(), "must be");
1625 frame callerFrame = stubFrame.sender(®_map);
1626 assert(callerFrame.is_compiled_frame(), "must be");
1627
1628 // Install exception and return forward entry.
1629 address res = SharedRuntime::throw_AbstractMethodError_entry();
1630 JRT_BLOCK
1631 methodHandle callee(current, invoke.static_target(current));
1632 if (!callee.is_null()) {
1633 oop recv = callerFrame.retrieve_receiver(®_map);
1634 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1635 res = StubRoutines::forward_exception_entry();
1636 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1637 }
1638 JRT_BLOCK_END
1639 return res;
1640 JRT_END
1641
1642 // return verified_code_entry if interp_only_mode is not set for the current thread;
1643 // otherwise return c2i entry.
1644 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1645 bool is_static_call, bool is_optimized, bool caller_does_not_scalarize) {
1646 bool is_interp_only_mode = (StressCallingConvention && (os::random() % (1 << 10)) == 0) || current->is_interp_only_mode();
1647 // In interp_only_mode we need to go to the interpreted entry
1648 // The c2i won't patch in this mode -- see fixup_callers_callsite
1649 bool go_to_interpreter = is_interp_only_mode && !callee_method->is_special_native_intrinsic();
1650
1651 if (caller_does_not_scalarize) {
1652 if (go_to_interpreter) {
1653 return callee_method->get_c2i_inline_entry();
1654 }
1655 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1656 return callee_method->verified_inline_code_entry();
1657 } else if (is_static_call || is_optimized) {
1658 if (go_to_interpreter) {
1659 return callee_method->get_c2i_entry();
1660 }
1661 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1662 return callee_method->verified_code_entry();
1663 } else {
1664 if (go_to_interpreter) {
1665 return callee_method->get_c2i_inline_ro_entry();
1666 }
1667 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1668 return callee_method->verified_inline_ro_code_entry();
1669 }
1670 }
1671
1672 // resolve a static call and patch code
1673 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1674 methodHandle callee_method;
1675 bool caller_does_not_scalarize = false;
1676 bool enter_special = false;
1677 JRT_BLOCK
1678 callee_method = SharedRuntime::resolve_helper(false, false, caller_does_not_scalarize, CHECK_NULL);
1679 current->set_vm_result_metadata(callee_method());
1680 JRT_BLOCK_END
1681 // return compiled code entry point after potential safepoints
1682 return get_resolved_entry(current, callee_method, true, false, caller_does_not_scalarize);
1683 JRT_END
1684
1685 // resolve virtual call and update inline cache to monomorphic
1686 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1687 methodHandle callee_method;
1688 bool caller_does_not_scalarize = false;
1689 JRT_BLOCK
1690 callee_method = SharedRuntime::resolve_helper(true, false, caller_does_not_scalarize, CHECK_NULL);
1691 current->set_vm_result_metadata(callee_method());
1692 JRT_BLOCK_END
1693 // return compiled code entry point after potential safepoints
1694 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1695 JRT_END
1696
1697
1698 // Resolve a virtual call that can be statically bound (e.g., always
1699 // monomorphic, so it has no inline cache). Patch code to resolved target.
1700 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1701 methodHandle callee_method;
1702 bool caller_does_not_scalarize = false;
1703 JRT_BLOCK
1704 callee_method = SharedRuntime::resolve_helper(true, true, caller_does_not_scalarize, CHECK_NULL);
1705 current->set_vm_result_metadata(callee_method());
1706 JRT_BLOCK_END
1707 // return compiled code entry point after potential safepoints
1708 return get_resolved_entry(current, callee_method, false, true, caller_does_not_scalarize);
1709 JRT_END
1710
1711 methodHandle SharedRuntime::handle_ic_miss_helper(bool& caller_does_not_scalarize, TRAPS) {
1712 JavaThread* current = THREAD;
1713 ResourceMark rm(current);
1714 CallInfo call_info;
1715 Bytecodes::Code bc;
1716
1717 // receiver is null for static calls. An exception is thrown for null
1718 // receivers for non-static calls
1719 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1720
1721 methodHandle callee_method(current, call_info.selected_method());
1722
1723 #ifndef PRODUCT
1724 AtomicAccess::inc(&_ic_miss_ctr);
1725
1726 // Statistics & Tracing
1727 if (TraceCallFixup) {
1728 ResourceMark rm(current);
1729 tty->print("IC miss (%s) %s call to", Bytecodes::name(bc), (caller_does_not_scalarize) ? "non-scalar" : "");
1730 callee_method->print_short_name(tty);
1731 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1732 }
1733
1734 if (ICMissHistogram) {
1735 MutexLocker m(VMStatistic_lock);
1736 RegisterMap reg_map(current,
1737 RegisterMap::UpdateMap::skip,
1738 RegisterMap::ProcessFrames::include,
1739 RegisterMap::WalkContinuation::skip);
1740 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1741 // produce statistics under the lock
1742 trace_ic_miss(f.pc());
1743 }
1744 #endif
1745
1746 // install an event collector so that when a vtable stub is created the
1747 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1748 // event can't be posted when the stub is created as locks are held
1749 // - instead the event will be deferred until the event collector goes
1750 // out of scope.
1751 JvmtiDynamicCodeEventCollector event_collector;
1752
1753 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1754 RegisterMap reg_map(current,
1755 RegisterMap::UpdateMap::skip,
1756 RegisterMap::ProcessFrames::include,
1757 RegisterMap::WalkContinuation::skip);
1758 frame caller_frame = current->last_frame().sender(®_map);
1759 CodeBlob* cb = caller_frame.cb();
1760 nmethod* caller_nm = cb->as_nmethod();
1761 // Calls via mismatching methods are always non-scalarized
1762 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1763 caller_does_not_scalarize = true;
1764 }
1765
1766 CompiledICLocker ml(caller_nm);
1767 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1768 inline_cache->update(&call_info, receiver()->klass(), caller_does_not_scalarize);
1769
1770 return callee_method;
1771 }
1772
1773 //
1774 // Resets a call-site in compiled code so it will get resolved again.
1775 // This routines handles both virtual call sites, optimized virtual call
1776 // sites, and static call sites. Typically used to change a call sites
1777 // destination from compiled to interpreted.
1778 //
1779 methodHandle SharedRuntime::reresolve_call_site(bool& is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1780 JavaThread* current = THREAD;
1781 ResourceMark rm(current);
1782 RegisterMap reg_map(current,
1783 RegisterMap::UpdateMap::skip,
1784 RegisterMap::ProcessFrames::include,
1785 RegisterMap::WalkContinuation::skip);
1786 frame stub_frame = current->last_frame();
1787 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1788 frame caller = stub_frame.sender(®_map);
1789 if (caller.is_compiled_frame()) {
1790 caller_does_not_scalarize = caller.cb()->as_nmethod()->is_compiled_by_c1();
1791 }
1792 assert(!caller.is_interpreted_frame(), "must be compiled");
1793
1794 // If the frame isn't a live compiled frame (i.e. deoptimized by the time we get here), no IC clearing must be done
1795 // for the caller. However, when the caller is C2 compiled and the callee a C1 or C2 compiled method, then we still
1796 // need to figure out whether it was an optimized virtual call with an inline type receiver. Otherwise, we end up
1797 // using the wrong method entry point and accidentally skip the buffering of the receiver.
1798 methodHandle callee_method = find_callee_method(caller_does_not_scalarize, CHECK_(methodHandle()));
1799 const bool caller_is_compiled_and_not_deoptimized = caller.is_compiled_frame() && !caller.is_deoptimized_frame();
1800 const bool caller_is_continuation_enter_intrinsic =
1801 caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic();
1802 const bool do_IC_clearing = caller_is_compiled_and_not_deoptimized || caller_is_continuation_enter_intrinsic;
1803
1804 const bool callee_compiled_with_scalarized_receiver = callee_method->has_compiled_code() &&
1805 !callee_method()->is_static() &&
1806 callee_method()->is_scalarized_arg(0);
1807 const bool compute_is_optimized = !caller_does_not_scalarize && callee_compiled_with_scalarized_receiver;
1808
1809 if (do_IC_clearing || compute_is_optimized) {
1810 address pc = caller.pc();
1811
1812 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1813 assert(caller_nm != nullptr, "did not find caller nmethod");
1814
1815 // Default call_addr is the location of the "basic" call.
1816 // Determine the address of the call we a reresolving. With
1817 // Inline Caches we will always find a recognizable call.
1818 // With Inline Caches disabled we may or may not find a
1819 // recognizable call. We will always find a call for static
1820 // calls and for optimized virtual calls. For vanilla virtual
1821 // calls it depends on the state of the UseInlineCaches switch.
1822 //
1823 // With Inline Caches disabled we can get here for a virtual call
1824 // for two reasons:
1825 // 1 - calling an abstract method. The vtable for abstract methods
1826 // will run us thru handle_wrong_method and we will eventually
1827 // end up in the interpreter to throw the ame.
1828 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1829 // call and between the time we fetch the entry address and
1830 // we jump to it the target gets deoptimized. Similar to 1
1831 // we will wind up in the interprter (thru a c2i with c2).
1832 //
1833 CompiledICLocker ml(caller_nm);
1834 address call_addr = caller_nm->call_instruction_address(pc);
1835
1836 if (call_addr != nullptr) {
1837 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1838 // bytes back in the instruction stream so we must also check for reloc info.
1839 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1840 bool ret = iter.next(); // Get item
1841 if (ret) {
1842 is_optimized = false;
1843 switch (iter.type()) {
1844 case relocInfo::static_call_type:
1845 assert(callee_method->is_static(), "must be");
1846 case relocInfo::opt_virtual_call_type: {
1847 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1848 if (do_IC_clearing) {
1849 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1850 cdc->set_to_clean();
1851 }
1852 break;
1853 }
1854
1855 case relocInfo::virtual_call_type: {
1856 if (do_IC_clearing) {
1857 // compiled, dispatched call (which used to call an interpreted method)
1858 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1859 inline_cache->set_to_clean();
1860 }
1861 break;
1862 }
1863 default:
1864 break;
1865 }
1866 }
1867 }
1868 }
1869
1870 #ifndef PRODUCT
1871 AtomicAccess::inc(&_wrong_method_ctr);
1872
1873 if (TraceCallFixup) {
1874 ResourceMark rm(current);
1875 tty->print("handle_wrong_method reresolving %s call to", (caller_does_not_scalarize) ? "non-scalar" : "");
1876 callee_method->print_short_name(tty);
1877 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1878 }
1879 #endif
1880
1881 return callee_method;
1882 }
1883
1884 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1885 // The faulting unsafe accesses should be changed to throw the error
1886 // synchronously instead. Meanwhile the faulting instruction will be
1887 // skipped over (effectively turning it into a no-op) and an
1888 // asynchronous exception will be raised which the thread will
1889 // handle at a later point. If the instruction is a load it will
1890 // return garbage.
1891
1892 // Request an async exception.
1893 thread->set_pending_unsafe_access_error();
1894
1895 // Return address of next instruction to execute.
1896 return next_pc;
1897 }
1898
1899 #ifdef ASSERT
1900 void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
1901 const BasicType* sig_bt,
1902 const VMRegPair* regs) {
1903 ResourceMark rm;
1904 const int total_args_passed = method->size_of_parameters();
1905 const VMRegPair* regs_with_member_name = regs;
1906 VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1907
1908 const int member_arg_pos = total_args_passed - 1;
1909 assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1910 assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1911
1912 java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
1913
1914 for (int i = 0; i < member_arg_pos; i++) {
1915 VMReg a = regs_with_member_name[i].first();
1916 VMReg b = regs_without_member_name[i].first();
1917 assert(a->value() == b->value(), "register allocation mismatch: a= %d, b= %d", a->value(), b->value());
1918 }
1919 assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1920 }
1921 #endif
1922
1923 // ---------------------------------------------------------------------------
1924 // We are calling the interpreter via a c2i. Normally this would mean that
1925 // we were called by a compiled method. However we could have lost a race
1926 // where we went int -> i2c -> c2i and so the caller could in fact be
1927 // interpreted. If the caller is compiled we attempt to patch the caller
1928 // so he no longer calls into the interpreter.
1929 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1930 AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
1931
1932 // It's possible that deoptimization can occur at a call site which hasn't
1933 // been resolved yet, in which case this function will be called from
1934 // an nmethod that has been patched for deopt and we can ignore the
1935 // request for a fixup.
1936 // Also it is possible that we lost a race in that from_compiled_entry
1937 // is now back to the i2c in that case we don't need to patch and if
1938 // we did we'd leap into space because the callsite needs to use
1939 // "to interpreter" stub in order to load up the Method*. Don't
1940 // ask me how I know this...
1941
1942 // Result from nmethod::is_unloading is not stable across safepoints.
1943 NoSafepointVerifier nsv;
1944
1945 nmethod* callee = method->code();
1946 if (callee == nullptr) {
1947 return;
1948 }
1949
1950 // write lock needed because we might patch call site by set_to_clean()
1951 // and is_unloading() can modify nmethod's state
1952 MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current()));
1953
1954 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1955 if (cb == nullptr || !cb->is_nmethod() || !callee->is_in_use() || callee->is_unloading()) {
1956 return;
1957 }
1958
1959 // The check above makes sure this is an nmethod.
1960 nmethod* caller = cb->as_nmethod();
1961
1962 // Get the return PC for the passed caller PC.
1963 address return_pc = caller_pc + frame::pc_return_offset;
1964
1965 if (!caller->is_in_use() || !NativeCall::is_call_before(return_pc)) {
1966 return;
1967 }
1968
1969 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1970 CompiledICLocker ic_locker(caller);
1971 ResourceMark rm;
1972
1973 // If we got here through a static call or opt_virtual call, then we know where the
1974 // call address would be; let's peek at it
1975 address callsite_addr = (address)nativeCall_before(return_pc);
1976 RelocIterator iter(caller, callsite_addr, callsite_addr + 1);
1977 if (!iter.next()) {
1978 // No reloc entry found; not a static or optimized virtual call
1979 return;
1980 }
1981
1982 relocInfo::relocType type = iter.reloc()->type();
1983 if (type != relocInfo::static_call_type &&
1984 type != relocInfo::opt_virtual_call_type) {
1985 return;
1986 }
1987
1988 CompiledDirectCall* callsite = CompiledDirectCall::before(return_pc);
1989 callsite->set_to_clean();
1990 JRT_END
1991
1992
1993 // same as JVM_Arraycopy, but called directly from compiled code
1994 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
1995 oopDesc* dest, jint dest_pos,
1996 jint length,
1997 JavaThread* current)) {
1998 #ifndef PRODUCT
1999 _slow_array_copy_ctr++;
2000 #endif
2001 // Check if we have null pointers
2002 if (src == nullptr || dest == nullptr) {
2003 THROW(vmSymbols::java_lang_NullPointerException());
2004 }
2005 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
2006 // even though the copy_array API also performs dynamic checks to ensure
2007 // that src and dest are truly arrays (and are conformable).
2008 // The copy_array mechanism is awkward and could be removed, but
2009 // the compilers don't call this function except as a last resort,
2010 // so it probably doesn't matter.
2011 src->klass()->copy_array((arrayOopDesc*)src, src_pos,
2012 (arrayOopDesc*)dest, dest_pos,
2013 length, current);
2014 }
2015 JRT_END
2016
2017 // The caller of generate_class_cast_message() (or one of its callers)
2018 // must use a ResourceMark in order to correctly free the result.
2019 char* SharedRuntime::generate_class_cast_message(
2020 JavaThread* thread, Klass* caster_klass) {
2021
2022 // Get target class name from the checkcast instruction
2023 vframeStream vfst(thread, true);
2024 assert(!vfst.at_end(), "Java frame must exist");
2025 Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
2026 constantPoolHandle cpool(thread, vfst.method()->constants());
2027 Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index());
2028 Symbol* target_klass_name = nullptr;
2029 if (target_klass == nullptr) {
2030 // This klass should be resolved, but just in case, get the name in the klass slot.
2031 target_klass_name = cpool->klass_name_at(cc.index());
2032 }
2033 return generate_class_cast_message(caster_klass, target_klass, target_klass_name);
2034 }
2035
2036
2037 // The caller of generate_class_cast_message() (or one of its callers)
2038 // must use a ResourceMark in order to correctly free the result.
2039 char* SharedRuntime::generate_class_cast_message(
2040 Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) {
2041 const char* caster_name = caster_klass->external_name();
2042
2043 assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided");
2044 const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() :
2045 target_klass->external_name();
2046
2047 size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1;
2048
2049 const char* caster_klass_description = "";
2050 const char* target_klass_description = "";
2051 const char* klass_separator = "";
2052 if (target_klass != nullptr && caster_klass->module() == target_klass->module()) {
2053 caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass);
2054 } else {
2055 caster_klass_description = caster_klass->class_in_module_of_loader();
2056 target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : "";
2057 klass_separator = (target_klass != nullptr) ? "; " : "";
2058 }
2059
2060 // add 3 for parenthesis and preceding space
2061 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2062
2063 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2064 if (message == nullptr) {
2065 // Shouldn't happen, but don't cause even more problems if it does
2066 message = const_cast<char*>(caster_klass->external_name());
2067 } else {
2068 jio_snprintf(message,
2069 msglen,
2070 "class %s cannot be cast to class %s (%s%s%s)",
2071 caster_name,
2072 target_name,
2073 caster_klass_description,
2074 klass_separator,
2075 target_klass_description
2076 );
2077 }
2078 return message;
2079 }
2080
2081 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2082 assert(klass->is_inline_klass(), "Must be a concrete value class");
2083 const char* desc = "Cannot synchronize on an instance of value class ";
2084 const char* className = klass->external_name();
2085 size_t msglen = strlen(desc) + strlen(className) + 1;
2086 char* message = NEW_RESOURCE_ARRAY(char, msglen);
2087 if (nullptr == message) {
2088 // Out of memory: can't create detailed error message
2089 message = const_cast<char*>(klass->external_name());
2090 } else {
2091 jio_snprintf(message, msglen, "%s%s", desc, className);
2092 }
2093 return message;
2094 }
2095
2096 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2097 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2098 JRT_END
2099
2100 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2101 if (!SafepointSynchronize::is_synchronizing()) {
2102 // Only try quick_enter() if we're not trying to reach a safepoint
2103 // so that the calling thread reaches the safepoint more quickly.
2104 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2105 return;
2106 }
2107 }
2108 // NO_ASYNC required because an async exception on the state transition destructor
2109 // would leave you with the lock held and it would never be released.
2110 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2111 // and the model is that an exception implies the method failed.
2112 JRT_BLOCK_NO_ASYNC
2113 Handle h_obj(THREAD, obj);
2114 ObjectSynchronizer::enter(h_obj, lock, current);
2115 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2116 JRT_BLOCK_END
2117 }
2118
2119 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2120 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2121 SharedRuntime::monitor_enter_helper(obj, lock, current);
2122 JRT_END
2123
2124 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2125 assert(JavaThread::current() == current, "invariant");
2126 // Exit must be non-blocking, and therefore no exceptions can be thrown.
2127 ExceptionMark em(current);
2128
2129 // Check if C2_MacroAssembler::fast_unlock() or
2130 // C2_MacroAssembler::fast_unlock() unlocked an inflated
2131 // monitor before going slow path. Since there is no safepoint
2132 // polling when calling into the VM, we can be sure that the monitor
2133 // hasn't been deallocated.
2134 ObjectMonitor* m = current->unlocked_inflated_monitor();
2135 if (m != nullptr) {
2136 assert(!m->has_owner(current), "must be");
2137 current->clear_unlocked_inflated_monitor();
2138
2139 // We need to reacquire the lock before we can call ObjectSynchronizer::exit().
2140 if (!m->try_enter(current, /*check_for_recursion*/ false)) {
2141 // Some other thread acquired the lock (or the monitor was
2142 // deflated). Either way we are done.
2143 return;
2144 }
2145 }
2146
2147 // The object could become unlocked through a JNI call, which we have no other checks for.
2148 // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2149 if (obj->is_unlocked()) {
2150 if (CheckJNICalls) {
2151 fatal("Object has been unlocked by JNI");
2152 }
2153 return;
2154 }
2155 ObjectSynchronizer::exit(obj, lock, current);
2156 }
2157
2158 // Handles the uncommon cases of monitor unlocking in compiled code
2159 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2160 assert(current == JavaThread::current(), "pre-condition");
2161 SharedRuntime::monitor_exit_helper(obj, lock, current);
2162 JRT_END
2163
2164 #ifndef PRODUCT
2165
2166 void SharedRuntime::print_statistics() {
2167 ttyLocker ttyl;
2168 if (xtty != nullptr) xtty->head("statistics type='SharedRuntime'");
2169
2170 SharedRuntime::print_ic_miss_histogram();
2171
2172 // Dump the JRT_ENTRY counters
2173 if (_new_instance_ctr) tty->print_cr("%5u new instance requires GC", _new_instance_ctr);
2174 if (_new_array_ctr) tty->print_cr("%5u new array requires GC", _new_array_ctr);
2175 if (_multi2_ctr) tty->print_cr("%5u multianewarray 2 dim", _multi2_ctr);
2176 if (_multi3_ctr) tty->print_cr("%5u multianewarray 3 dim", _multi3_ctr);
2177 if (_multi4_ctr) tty->print_cr("%5u multianewarray 4 dim", _multi4_ctr);
2178 if (_multi5_ctr) tty->print_cr("%5u multianewarray 5 dim", _multi5_ctr);
2179
2180 tty->print_cr("%5u inline cache miss in compiled", _ic_miss_ctr);
2181 tty->print_cr("%5u wrong method", _wrong_method_ctr);
2182 tty->print_cr("%5u unresolved static call site", _resolve_static_ctr);
2183 tty->print_cr("%5u unresolved virtual call site", _resolve_virtual_ctr);
2184 tty->print_cr("%5u unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2185
2186 if (_mon_enter_stub_ctr) tty->print_cr("%5u monitor enter stub", _mon_enter_stub_ctr);
2187 if (_mon_exit_stub_ctr) tty->print_cr("%5u monitor exit stub", _mon_exit_stub_ctr);
2188 if (_mon_enter_ctr) tty->print_cr("%5u monitor enter slow", _mon_enter_ctr);
2189 if (_mon_exit_ctr) tty->print_cr("%5u monitor exit slow", _mon_exit_ctr);
2190 if (_partial_subtype_ctr) tty->print_cr("%5u slow partial subtype", _partial_subtype_ctr);
2191 if (_jbyte_array_copy_ctr) tty->print_cr("%5u byte array copies", _jbyte_array_copy_ctr);
2192 if (_jshort_array_copy_ctr) tty->print_cr("%5u short array copies", _jshort_array_copy_ctr);
2193 if (_jint_array_copy_ctr) tty->print_cr("%5u int array copies", _jint_array_copy_ctr);
2194 if (_jlong_array_copy_ctr) tty->print_cr("%5u long array copies", _jlong_array_copy_ctr);
2195 if (_oop_array_copy_ctr) tty->print_cr("%5u oop array copies", _oop_array_copy_ctr);
2196 if (_checkcast_array_copy_ctr) tty->print_cr("%5u checkcast array copies", _checkcast_array_copy_ctr);
2197 if (_unsafe_array_copy_ctr) tty->print_cr("%5u unsafe array copies", _unsafe_array_copy_ctr);
2198 if (_generic_array_copy_ctr) tty->print_cr("%5u generic array copies", _generic_array_copy_ctr);
2199 if (_slow_array_copy_ctr) tty->print_cr("%5u slow array copies", _slow_array_copy_ctr);
2200 if (_find_handler_ctr) tty->print_cr("%5u find exception handler", _find_handler_ctr);
2201 if (_rethrow_ctr) tty->print_cr("%5u rethrow handler", _rethrow_ctr);
2202 if (_unsafe_set_memory_ctr) tty->print_cr("%5u unsafe set memorys", _unsafe_set_memory_ctr);
2203
2204 AdapterHandlerLibrary::print_statistics();
2205
2206 if (xtty != nullptr) xtty->tail("statistics");
2207 }
2208
2209 inline double percent(int64_t x, int64_t y) {
2210 return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
2211 }
2212
2213 class MethodArityHistogram {
2214 public:
2215 enum { MAX_ARITY = 256 };
2216 private:
2217 static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2218 static uint64_t _size_histogram[MAX_ARITY]; // histogram of arg size in words
2219 static uint64_t _total_compiled_calls;
2220 static uint64_t _max_compiled_calls_per_method;
2221 static int _max_arity; // max. arity seen
2222 static int _max_size; // max. arg size seen
2223
2224 static void add_method_to_histogram(nmethod* nm) {
2225 Method* method = (nm == nullptr) ? nullptr : nm->method();
2226 if (method != nullptr) {
2227 ArgumentCount args(method->signature());
2228 int arity = args.size() + (method->is_static() ? 0 : 1);
2229 int argsize = method->size_of_parameters();
2230 arity = MIN2(arity, MAX_ARITY-1);
2231 argsize = MIN2(argsize, MAX_ARITY-1);
2232 uint64_t count = (uint64_t)method->compiled_invocation_count();
2233 _max_compiled_calls_per_method = count > _max_compiled_calls_per_method ? count : _max_compiled_calls_per_method;
2234 _total_compiled_calls += count;
2235 _arity_histogram[arity] += count;
2236 _size_histogram[argsize] += count;
2237 _max_arity = MAX2(_max_arity, arity);
2238 _max_size = MAX2(_max_size, argsize);
2239 }
2240 }
2241
2242 void print_histogram_helper(int n, uint64_t* histo, const char* name) {
2243 const int N = MIN2(9, n);
2244 double sum = 0;
2245 double weighted_sum = 0;
2246 for (int i = 0; i <= n; i++) { sum += (double)histo[i]; weighted_sum += (double)(i*histo[i]); }
2247 if (sum >= 1) { // prevent divide by zero or divide overflow
2248 double rest = sum;
2249 double percent = sum / 100;
2250 for (int i = 0; i <= N; i++) {
2251 rest -= (double)histo[i];
2252 tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], (double)histo[i] / percent);
2253 }
2254 tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent);
2255 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2256 tty->print_cr("(total # of compiled calls = " INT64_FORMAT_W(14) ")", _total_compiled_calls);
2257 tty->print_cr("(max # of compiled calls = " INT64_FORMAT_W(14) ")", _max_compiled_calls_per_method);
2258 } else {
2259 tty->print_cr("Histogram generation failed for %s. n = %d, sum = %7.5f", name, n, sum);
2260 }
2261 }
2262
2263 void print_histogram() {
2264 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2265 print_histogram_helper(_max_arity, _arity_histogram, "arity");
2266 tty->print_cr("\nHistogram of parameter block size (in words, incl. rcvr):");
2267 print_histogram_helper(_max_size, _size_histogram, "size");
2268 tty->cr();
2269 }
2270
2271 public:
2272 MethodArityHistogram() {
2273 // Take the Compile_lock to protect against changes in the CodeBlob structures
2274 MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2275 // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2276 MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2277 _max_arity = _max_size = 0;
2278 _total_compiled_calls = 0;
2279 _max_compiled_calls_per_method = 0;
2280 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2281 CodeCache::nmethods_do(add_method_to_histogram);
2282 print_histogram();
2283 }
2284 };
2285
2286 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2287 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2288 uint64_t MethodArityHistogram::_total_compiled_calls;
2289 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2290 int MethodArityHistogram::_max_arity;
2291 int MethodArityHistogram::_max_size;
2292
2293 void SharedRuntime::print_call_statistics(uint64_t comp_total) {
2294 tty->print_cr("Calls from compiled code:");
2295 int64_t total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2296 int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2297 int64_t mono_i = _nof_interface_calls;
2298 tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%) total non-inlined ", total);
2299 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
2300 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2301 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
2302 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2303 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
2304 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2305 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
2306 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2307 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2308 tty->cr();
2309 tty->print_cr("Note 1: counter updates are not MT-safe.");
2310 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2311 tty->print_cr(" %% in nested categories are relative to their category");
2312 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2313 tty->cr();
2314
2315 MethodArityHistogram h;
2316 }
2317 #endif
2318
2319 #ifndef PRODUCT
2320 static int _lookups; // number of calls to lookup
2321 static int _equals; // number of buckets checked with matching hash
2322 static int _archived_hits; // number of successful lookups in archived table
2323 static int _runtime_hits; // number of successful lookups in runtime table
2324 #endif
2325
2326 // A simple wrapper class around the calling convention information
2327 // that allows sharing of adapters for the same calling convention.
2328 class AdapterFingerPrint : public MetaspaceObj {
2329 public:
2330 class Element {
2331 private:
2332 // The highest byte is the type of the argument. The remaining bytes contain the offset of the
2333 // field if it is flattened in the calling convention, -1 otherwise.
2334 juint _payload;
2335
2336 static constexpr int offset_bit_width = 24;
2337 static constexpr juint offset_bit_mask = (1 << offset_bit_width) - 1;
2338 public:
2339 Element(BasicType bt, int offset) : _payload((static_cast<juint>(bt) << offset_bit_width) | (juint(offset) & offset_bit_mask)) {
2340 assert(offset >= -1 && offset < jint(offset_bit_mask), "invalid offset %d", offset);
2341 }
2342
2343 BasicType bt() const {
2344 return static_cast<BasicType>(_payload >> offset_bit_width);
2345 }
2346
2347 int offset() const {
2348 juint res = _payload & offset_bit_mask;
2349 return res == offset_bit_mask ? -1 : res;
2350 }
2351
2352 juint hash() const {
2353 return _payload;
2354 }
2355
2356 bool operator!=(const Element& other) const {
2357 return _payload != other._payload;
2358 }
2359 };
2360
2361 private:
2362 const bool _has_ro_adapter;
2363 const int _length;
2364
2365 static int data_offset() { return sizeof(AdapterFingerPrint); }
2366 Element* data_pointer() {
2367 return reinterpret_cast<Element*>(reinterpret_cast<address>(this) + data_offset());
2368 }
2369
2370 const Element& element_at(int index) {
2371 assert(index < length(), "index %d out of bounds for length %d", index, length());
2372 Element* data = data_pointer();
2373 return data[index];
2374 }
2375
2376 // Private construtor. Use allocate() to get an instance.
2377 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter)
2378 : _has_ro_adapter(has_ro_adapter), _length(total_args_passed_in_sig(sig)) {
2379 Element* data = data_pointer();
2380 BasicType prev_bt = T_ILLEGAL;
2381 int vt_count = 0;
2382 for (int index = 0; index < _length; index++) {
2383 const SigEntry& sig_entry = sig->at(index);
2384 BasicType bt = sig_entry._bt;
2385 if (bt == T_METADATA) {
2386 // Found start of inline type in signature
2387 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2388 vt_count++;
2389 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2390 // Found end of inline type in signature
2391 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2392 vt_count--;
2393 assert(vt_count >= 0, "invalid vt_count");
2394 } else if (vt_count == 0) {
2395 // Widen fields that are not part of a scalarized inline type argument
2396 assert(sig_entry._offset == -1, "invalid offset for argument that is not a flattened field %d", sig_entry._offset);
2397 bt = adapter_encoding(bt);
2398 }
2399
2400 ::new(&data[index]) Element(bt, sig_entry._offset);
2401 prev_bt = bt;
2402 }
2403 assert(vt_count == 0, "invalid vt_count");
2404 }
2405
2406 // Call deallocate instead
2407 ~AdapterFingerPrint() {
2408 ShouldNotCallThis();
2409 }
2410
2411 static int total_args_passed_in_sig(const GrowableArray<SigEntry>* sig) {
2412 return (sig != nullptr) ? sig->length() : 0;
2413 }
2414
2415 static int compute_size_in_words(int len) {
2416 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(Element)));
2417 }
2418
2419 // Remap BasicTypes that are handled equivalently by the adapters.
2420 // These are correct for the current system but someday it might be
2421 // necessary to make this mapping platform dependent.
2422 static BasicType adapter_encoding(BasicType in) {
2423 switch (in) {
2424 case T_BOOLEAN:
2425 case T_BYTE:
2426 case T_SHORT:
2427 case T_CHAR:
2428 // They are all promoted to T_INT in the calling convention
2429 return T_INT;
2430
2431 case T_OBJECT:
2432 case T_ARRAY:
2433 // In other words, we assume that any register good enough for
2434 // an int or long is good enough for a managed pointer.
2435 #ifdef _LP64
2436 return T_LONG;
2437 #else
2438 return T_INT;
2439 #endif
2440
2441 case T_INT:
2442 case T_LONG:
2443 case T_FLOAT:
2444 case T_DOUBLE:
2445 case T_VOID:
2446 return in;
2447
2448 default:
2449 ShouldNotReachHere();
2450 return T_CONFLICT;
2451 }
2452 }
2453
2454 void* operator new(size_t size, size_t fp_size) throw() {
2455 assert(fp_size >= size, "sanity check");
2456 void* p = AllocateHeap(fp_size, mtCode);
2457 memset(p, 0, fp_size);
2458 return p;
2459 }
2460
2461 public:
2462 template<typename Function>
2463 void iterate_args(Function function) {
2464 for (int i = 0; i < length(); i++) {
2465 function(element_at(i));
2466 }
2467 }
2468
2469 static AdapterFingerPrint* allocate(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2470 int len = total_args_passed_in_sig(sig);
2471 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2472 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(sig, has_ro_adapter);
2473 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2474 return afp;
2475 }
2476
2477 static void deallocate(AdapterFingerPrint* fp) {
2478 FreeHeap(fp);
2479 }
2480
2481 bool has_ro_adapter() const {
2482 return _has_ro_adapter;
2483 }
2484
2485 int length() const {
2486 return _length;
2487 }
2488
2489 unsigned int compute_hash() {
2490 int hash = 0;
2491 for (int i = 0; i < length(); i++) {
2492 const Element& v = element_at(i);
2493 //Add arithmetic operation to the hash, like +3 to improve hashing
2494 hash = ((hash << 8) ^ v.hash() ^ (hash >> 5)) + 3;
2495 }
2496 return (unsigned int)hash;
2497 }
2498
2499 const char* as_string() {
2500 stringStream st;
2501 st.print("{");
2502 if (_has_ro_adapter) {
2503 st.print("has_ro_adapter");
2504 } else {
2505 st.print("no_ro_adapter");
2506 }
2507 for (int i = 0; i < length(); i++) {
2508 st.print(", ");
2509 const Element& elem = element_at(i);
2510 st.print("{%s, %d}", type2name(elem.bt()), elem.offset());
2511 }
2512 st.print("}");
2513 return st.as_string();
2514 }
2515
2516 const char* as_basic_args_string() {
2517 stringStream st;
2518 bool long_prev = false;
2519 iterate_args([&] (const Element& arg) {
2520 if (long_prev) {
2521 long_prev = false;
2522 if (arg.bt() == T_VOID) {
2523 st.print("J");
2524 } else {
2525 st.print("L");
2526 }
2527 }
2528 if (arg.bt() == T_LONG) {
2529 long_prev = true;
2530 } else if (arg.bt() != T_VOID) {
2531 st.print("%c", type2char(arg.bt()));
2532 }
2533 });
2534 if (long_prev) {
2535 st.print("L");
2536 }
2537 return st.as_string();
2538 }
2539
2540 bool equals(AdapterFingerPrint* other) {
2541 if (other->_has_ro_adapter != _has_ro_adapter) {
2542 return false;
2543 } else if (other->_length != _length) {
2544 return false;
2545 } else {
2546 for (int i = 0; i < _length; i++) {
2547 if (element_at(i) != other->element_at(i)) {
2548 return false;
2549 }
2550 }
2551 }
2552 return true;
2553 }
2554
2555 // methods required by virtue of being a MetaspaceObj
2556 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2557 int size() const { return compute_size_in_words(_length); }
2558 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2559
2560 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2561 NOT_PRODUCT(_equals++);
2562 return fp1->equals(fp2);
2563 }
2564
2565 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2566 return fp->compute_hash();
2567 }
2568 };
2569
2570 #if INCLUDE_CDS
2571 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2572 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2573 }
2574
2575 class ArchivedAdapterTable : public OffsetCompactHashtable<
2576 AdapterFingerPrint*,
2577 AdapterHandlerEntry*,
2578 adapter_fp_equals_compact_hashtable_entry> {};
2579 #endif // INCLUDE_CDS
2580
2581 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2582 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2583 AnyObj::C_HEAP, mtCode,
2584 AdapterFingerPrint::compute_hash,
2585 AdapterFingerPrint::equals>;
2586 static AdapterHandlerTable* _adapter_handler_table;
2587 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2588
2589 // Find a entry with the same fingerprint if it exists
2590 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter) {
2591 NOT_PRODUCT(_lookups++);
2592 assert_lock_strong(AdapterHandlerLibrary_lock);
2593 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(sig, has_ro_adapter);
2594 AdapterHandlerEntry* entry = nullptr;
2595 #if INCLUDE_CDS
2596 // if we are building the archive then the archived adapter table is
2597 // not valid and we need to use the ones added to the runtime table
2598 if (AOTCodeCache::is_using_adapter()) {
2599 // Search archived table first. It is read-only table so can be searched without lock
2600 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2601 #ifndef PRODUCT
2602 if (entry != nullptr) {
2603 _archived_hits++;
2604 }
2605 #endif
2606 }
2607 #endif // INCLUDE_CDS
2608 if (entry == nullptr) {
2609 assert_lock_strong(AdapterHandlerLibrary_lock);
2610 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2611 if (entry_p != nullptr) {
2612 entry = *entry_p;
2613 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2614 entry->fingerprint()->as_basic_args_string(), entry->fingerprint()->as_string(), entry->fingerprint()->compute_hash(),
2615 fp->as_basic_args_string(), fp->as_string(), fp->compute_hash());
2616 #ifndef PRODUCT
2617 _runtime_hits++;
2618 #endif
2619 }
2620 }
2621 AdapterFingerPrint::deallocate(fp);
2622 return entry;
2623 }
2624
2625 #ifndef PRODUCT
2626 static void print_table_statistics() {
2627 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2628 return sizeof(*key) + sizeof(*a);
2629 };
2630 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2631 ts.print(tty, "AdapterHandlerTable");
2632 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2633 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2634 int total_hits = _archived_hits + _runtime_hits;
2635 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2636 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2637 }
2638 #endif
2639
2640 // ---------------------------------------------------------------------------
2641 // Implementation of AdapterHandlerLibrary
2642 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2643 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2644 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2645 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2646 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2647 #if INCLUDE_CDS
2648 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2649 #endif // INCLUDE_CDS
2650 static const int AdapterHandlerLibrary_size = 48*K;
2651 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2652 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2653
2654 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2655 assert(_buffer != nullptr, "should be initialized");
2656 return _buffer;
2657 }
2658
2659 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2660 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2661 AdapterBlob* adapter_blob = entry->adapter_blob();
2662 char blob_id[256];
2663 jio_snprintf(blob_id,
2664 sizeof(blob_id),
2665 "%s(%s)",
2666 adapter_blob->name(),
2667 entry->fingerprint()->as_string());
2668 if (Forte::is_enabled()) {
2669 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2670 }
2671
2672 if (JvmtiExport::should_post_dynamic_code_generated()) {
2673 JvmtiExport::post_dynamic_code_generated(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2674 }
2675 }
2676 }
2677
2678 void AdapterHandlerLibrary::initialize() {
2679 {
2680 ResourceMark rm;
2681 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2682 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2683 }
2684
2685 #if INCLUDE_CDS
2686 // Link adapters in AOT Cache to their code in AOT Code Cache
2687 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2688 link_aot_adapters();
2689 lookup_simple_adapters();
2690 return;
2691 }
2692 #endif // INCLUDE_CDS
2693
2694 ResourceMark rm;
2695 {
2696 MutexLocker mu(AdapterHandlerLibrary_lock);
2697
2698 CompiledEntrySignature no_args;
2699 no_args.compute_calling_conventions();
2700 _no_arg_handler = create_adapter(no_args, true);
2701
2702 CompiledEntrySignature obj_args;
2703 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
2704 obj_args.compute_calling_conventions();
2705 _obj_arg_handler = create_adapter(obj_args, true);
2706
2707 CompiledEntrySignature int_args;
2708 SigEntry::add_entry(int_args.sig(), T_INT);
2709 int_args.compute_calling_conventions();
2710 _int_arg_handler = create_adapter(int_args, true);
2711
2712 CompiledEntrySignature obj_int_args;
2713 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
2714 SigEntry::add_entry(obj_int_args.sig(), T_INT);
2715 obj_int_args.compute_calling_conventions();
2716 _obj_int_arg_handler = create_adapter(obj_int_args, true);
2717
2718 CompiledEntrySignature obj_obj_args;
2719 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2720 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2721 obj_obj_args.compute_calling_conventions();
2722 _obj_obj_arg_handler = create_adapter(obj_obj_args, true);
2723
2724 // we should always get an entry back but we don't have any
2725 // associated blob on Zero
2726 assert(_no_arg_handler != nullptr &&
2727 _obj_arg_handler != nullptr &&
2728 _int_arg_handler != nullptr &&
2729 _obj_int_arg_handler != nullptr &&
2730 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2731 }
2732
2733 // Outside of the lock
2734 #ifndef ZERO
2735 // no blobs to register when we are on Zero
2736 post_adapter_creation(_no_arg_handler);
2737 post_adapter_creation(_obj_arg_handler);
2738 post_adapter_creation(_int_arg_handler);
2739 post_adapter_creation(_obj_int_arg_handler);
2740 post_adapter_creation(_obj_obj_arg_handler);
2741 #endif // ZERO
2742 }
2743
2744 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2745 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2746 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2747 return AdapterHandlerEntry::allocate(id, fingerprint);
2748 }
2749
2750 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2751 int total_args_passed = method->size_of_parameters(); // All args on stack
2752 if (total_args_passed == 0) {
2753 return _no_arg_handler;
2754 } else if (total_args_passed == 1) {
2755 if (!method->is_static()) {
2756 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2757 return nullptr;
2758 }
2759 return _obj_arg_handler;
2760 }
2761 switch (method->signature()->char_at(1)) {
2762 case JVM_SIGNATURE_CLASS: {
2763 if (InlineTypePassFieldsAsArgs) {
2764 SignatureStream ss(method->signature());
2765 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2766 if (vk != nullptr) {
2767 return nullptr;
2768 }
2769 }
2770 return _obj_arg_handler;
2771 }
2772 case JVM_SIGNATURE_ARRAY:
2773 return _obj_arg_handler;
2774 case JVM_SIGNATURE_INT:
2775 case JVM_SIGNATURE_BOOLEAN:
2776 case JVM_SIGNATURE_CHAR:
2777 case JVM_SIGNATURE_BYTE:
2778 case JVM_SIGNATURE_SHORT:
2779 return _int_arg_handler;
2780 }
2781 } else if (total_args_passed == 2 &&
2782 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2783 switch (method->signature()->char_at(1)) {
2784 case JVM_SIGNATURE_CLASS: {
2785 if (InlineTypePassFieldsAsArgs) {
2786 SignatureStream ss(method->signature());
2787 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2788 if (vk != nullptr) {
2789 return nullptr;
2790 }
2791 }
2792 return _obj_obj_arg_handler;
2793 }
2794 case JVM_SIGNATURE_ARRAY:
2795 return _obj_obj_arg_handler;
2796 case JVM_SIGNATURE_INT:
2797 case JVM_SIGNATURE_BOOLEAN:
2798 case JVM_SIGNATURE_CHAR:
2799 case JVM_SIGNATURE_BYTE:
2800 case JVM_SIGNATURE_SHORT:
2801 return _obj_int_arg_handler;
2802 }
2803 }
2804 return nullptr;
2805 }
2806
2807 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2808 _method(method), _num_inline_args(0), _has_inline_recv(false),
2809 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2810 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2811 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2812 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2813 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2814 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2815 }
2816
2817 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2818 // or the same entry for VEP and VIEP(RO).
2819 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2820 if (!has_scalarized_args()) {
2821 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2822 return CodeOffsets::Verified_Entry;
2823 }
2824 if (_method->is_static()) {
2825 // Static methods don't need VIEP(RO)
2826 return CodeOffsets::Verified_Entry;
2827 }
2828
2829 if (has_inline_recv()) {
2830 if (num_inline_args() == 1) {
2831 // Share same entry for VIEP and VIEP(RO).
2832 // This is quite common: we have an instance method in an InlineKlass that has
2833 // no inline type args other than <this>.
2834 return CodeOffsets::Verified_Inline_Entry;
2835 } else {
2836 assert(num_inline_args() > 1, "must be");
2837 // No sharing:
2838 // VIEP(RO) -- <this> is passed as object
2839 // VEP -- <this> is passed as fields
2840 return CodeOffsets::Verified_Inline_Entry_RO;
2841 }
2842 }
2843
2844 // Either a static method, or <this> is not an inline type
2845 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2846 // No sharing:
2847 // Some arguments are passed on the stack, and we have inserted reserved entries
2848 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2849 return CodeOffsets::Verified_Inline_Entry_RO;
2850 } else {
2851 // Share same entry for VEP and VIEP(RO).
2852 return CodeOffsets::Verified_Entry;
2853 }
2854 }
2855
2856 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2857 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2858 if (_supers != nullptr) {
2859 return _supers;
2860 }
2861 _supers = new GrowableArray<Method*>();
2862 // Skip private, static, and <init> methods
2863 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2864 return _supers;
2865 }
2866 Symbol* name = _method->name();
2867 Symbol* signature = _method->signature();
2868 const Klass* holder = _method->method_holder()->super();
2869 Symbol* holder_name = holder->name();
2870 ThreadInVMfromUnknown tiv;
2871 JavaThread* current = JavaThread::current();
2872 HandleMark hm(current);
2873 Handle loader(current, _method->method_holder()->class_loader());
2874
2875 // Walk up the class hierarchy and search for super methods
2876 while (holder != nullptr) {
2877 Method* super_method = holder->lookup_method(name, signature);
2878 if (super_method == nullptr) {
2879 break;
2880 }
2881 if (!super_method->is_static() && !super_method->is_private() &&
2882 (!super_method->is_package_private() ||
2883 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2884 _supers->push(super_method);
2885 }
2886 holder = super_method->method_holder()->super();
2887 }
2888 // Search interfaces for super methods
2889 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2890 for (int i = 0; i < interfaces->length(); ++i) {
2891 Method* m = interfaces->at(i)->lookup_method(name, signature);
2892 if (m != nullptr && !m->is_static() && m->is_public()) {
2893 _supers->push(m);
2894 }
2895 }
2896 return _supers;
2897 }
2898
2899 // Iterate over arguments and compute scalarized and non-scalarized signatures
2900 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2901 bool has_scalarized = false;
2902 if (_method != nullptr) {
2903 InstanceKlass* holder = _method->method_holder();
2904 int arg_num = 0;
2905 if (!_method->is_static()) {
2906 // We shouldn't scalarize 'this' in a value class constructor
2907 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() &&
2908 !_method->is_object_constructor() && (init || _method->is_scalarized_arg(arg_num))) {
2909 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2910 _sig_cc->insert_before(1, SigEntry(T_OBJECT, 0, nullptr, false, true)); // buffer argument
2911 has_scalarized = true;
2912 _has_inline_recv = true;
2913 _num_inline_args++;
2914 } else {
2915 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2916 }
2917 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2918 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2919 arg_num++;
2920 }
2921 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2922 BasicType bt = ss.type();
2923 if (InlineTypePassFieldsAsArgs && bt == T_OBJECT) {
2924 InlineKlass* vk = ss.as_inline_klass(holder);
2925 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2926 // Check for a calling convention mismatch with super method(s)
2927 bool scalar_super = false;
2928 bool non_scalar_super = false;
2929 GrowableArray<Method*>* supers = get_supers();
2930 for (int i = 0; i < supers->length(); ++i) {
2931 Method* super_method = supers->at(i);
2932 if (super_method->is_scalarized_arg(arg_num)) {
2933 scalar_super = true;
2934 } else {
2935 non_scalar_super = true;
2936 }
2937 }
2938 #ifdef ASSERT
2939 // Randomly enable below code paths for stress testing
2940 bool stress = init && StressCallingConvention;
2941 if (stress && (os::random() & 1) == 1) {
2942 non_scalar_super = true;
2943 if ((os::random() & 1) == 1) {
2944 scalar_super = true;
2945 }
2946 }
2947 #endif
2948 if (non_scalar_super) {
2949 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2950 if (scalar_super) {
2951 // Found non-scalar *and* scalar super methods. We can't handle both.
2952 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2953 for (int i = 0; i < supers->length(); ++i) {
2954 Method* super_method = supers->at(i);
2955 if (super_method->is_scalarized_arg(arg_num) DEBUG_ONLY(|| (stress && (os::random() & 1) == 1))) {
2956 JavaThread* thread = JavaThread::current();
2957 HandleMark hm(thread);
2958 methodHandle mh(thread, super_method);
2959 DeoptimizationScope deopt_scope;
2960 {
2961 // Keep the lock scope minimal. Prevent interference with other
2962 // dependency checks by setting mismatch and marking within the lock.
2963 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2964 super_method->set_mismatch();
2965 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2966 }
2967 deopt_scope.deoptimize_marked();
2968 }
2969 }
2970 }
2971 // Fall back to non-scalarized calling convention
2972 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2973 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2974 } else {
2975 _num_inline_args++;
2976 has_scalarized = true;
2977 int last = _sig_cc->length();
2978 int last_ro = _sig_cc_ro->length();
2979 _sig_cc->appendAll(vk->extended_sig());
2980 _sig_cc_ro->appendAll(vk->extended_sig());
2981 // buffer argument
2982 _sig_cc->insert_before(last + 1, SigEntry(T_OBJECT, 0, nullptr, false, true));
2983 _sig_cc_ro->insert_before(last_ro + 1, SigEntry(T_OBJECT, 0, nullptr, false, true));
2984 // Insert InlineTypeNode::NullMarker field right after T_METADATA delimiter
2985 _sig_cc->insert_before(last + 2, SigEntry(T_BOOLEAN, -1, nullptr, true, false));
2986 _sig_cc_ro->insert_before(last_ro + 2, SigEntry(T_BOOLEAN, -1, nullptr, true, false));
2987 }
2988 } else {
2989 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2990 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2991 }
2992 bt = T_OBJECT;
2993 } else {
2994 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2995 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2996 }
2997 SigEntry::add_entry(_sig, bt, ss.as_symbol());
2998 if (bt != T_VOID) {
2999 arg_num++;
3000 }
3001 }
3002 }
3003
3004 // Compute the non-scalarized calling convention
3005 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3006 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3007
3008 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3009 if (has_scalarized && !_method->is_native()) {
3010 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3011 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3012
3013 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3014 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3015
3016 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3017 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3018
3019 // Upper bound on stack arguments to avoid hitting the argument limit and
3020 // bailing out of compilation ("unsupported incoming calling sequence").
3021 // TODO 8281260 We need a reasonable limit (flag?) here
3022 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 75) {
3023 return; // Success
3024 }
3025 }
3026
3027 // No scalarized args
3028 _sig_cc = _sig;
3029 _regs_cc = _regs;
3030 _args_on_stack_cc = _args_on_stack;
3031
3032 _sig_cc_ro = _sig;
3033 _regs_cc_ro = _regs;
3034 _args_on_stack_cc_ro = _args_on_stack;
3035 }
3036
3037 void CompiledEntrySignature::initialize_from_fingerprint(AdapterFingerPrint* fingerprint) {
3038 _has_inline_recv = fingerprint->has_ro_adapter();
3039
3040 int value_object_count = 0;
3041 BasicType prev_bt = T_ILLEGAL;
3042 bool has_scalarized_arguments = false;
3043 bool long_prev = false;
3044 int long_prev_offset = -1;
3045 bool skipping_inline_recv = false;
3046 bool receiver_handled = false;
3047
3048 fingerprint->iterate_args([&] (const AdapterFingerPrint::Element& arg) {
3049 BasicType bt = arg.bt();
3050 int offset = arg.offset();
3051
3052 if (long_prev) {
3053 long_prev = false;
3054 BasicType bt_to_add;
3055 if (bt == T_VOID) {
3056 bt_to_add = T_LONG;
3057 } else {
3058 bt_to_add = T_OBJECT;
3059 }
3060 if (value_object_count == 0) {
3061 SigEntry::add_entry(_sig, bt_to_add);
3062 }
3063 assert(long_prev_offset != 0, "no buffer argument here");
3064 SigEntry::add_entry(_sig_cc, bt_to_add, nullptr, long_prev_offset);
3065 if (!skipping_inline_recv) {
3066 SigEntry::add_entry(_sig_cc_ro, bt_to_add, nullptr, long_prev_offset);
3067 }
3068 }
3069
3070 switch (bt) {
3071 case T_VOID:
3072 if (prev_bt != T_LONG && prev_bt != T_DOUBLE) {
3073 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
3074 value_object_count--;
3075 SigEntry::add_entry(_sig_cc, T_VOID, nullptr, offset);
3076 if (!skipping_inline_recv) {
3077 SigEntry::add_entry(_sig_cc_ro, T_VOID, nullptr, offset);
3078 } else if (value_object_count == 0) {
3079 skipping_inline_recv = false;
3080 }
3081 assert(value_object_count >= 0, "invalid value object count");
3082 } else {
3083 // Nothing to add for _sig: We already added an addition T_VOID in add_entry() when adding T_LONG or T_DOUBLE.
3084 }
3085 break;
3086 case T_INT:
3087 case T_FLOAT:
3088 case T_DOUBLE:
3089 if (value_object_count == 0) {
3090 SigEntry::add_entry(_sig, bt);
3091 }
3092 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3093 if (!skipping_inline_recv) {
3094 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3095 }
3096 break;
3097 case T_LONG:
3098 long_prev = true;
3099 long_prev_offset = offset;
3100 break;
3101 case T_BOOLEAN:
3102 case T_CHAR:
3103 case T_BYTE:
3104 case T_SHORT:
3105 case T_OBJECT:
3106 case T_ARRAY:
3107 assert(value_object_count > 0, "must be value object field");
3108 assert(offset != 0 || (bt == T_OBJECT && prev_bt == T_METADATA), "buffer input expected here");
3109 SigEntry::add_entry(_sig_cc, bt, nullptr, offset, offset == -1, offset == 0);
3110 if (!skipping_inline_recv) {
3111 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset, offset == -1, offset == 0);
3112 }
3113 break;
3114 case T_METADATA:
3115 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
3116 if (value_object_count == 0) {
3117 SigEntry::add_entry(_sig, T_OBJECT);
3118 }
3119 SigEntry::add_entry(_sig_cc, T_METADATA, nullptr, offset);
3120 if (!skipping_inline_recv) {
3121 if (!receiver_handled && _has_inline_recv && value_object_count == 0) {
3122 SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3123 skipping_inline_recv = true;
3124 receiver_handled = true;
3125 } else {
3126 SigEntry::add_entry(_sig_cc_ro, T_METADATA, nullptr, offset);
3127 }
3128 }
3129 value_object_count++;
3130 has_scalarized_arguments = true;
3131 break;
3132 default: {
3133 fatal("Unexpected BasicType: %s", basictype_to_str(bt));
3134 }
3135 }
3136 prev_bt = bt;
3137 });
3138
3139 if (long_prev) {
3140 // If previous bt was T_LONG and we reached the end of the signature, we know that it must be a T_OBJECT.
3141 SigEntry::add_entry(_sig, T_OBJECT);
3142 SigEntry::add_entry(_sig_cc, T_OBJECT);
3143 SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3144 }
3145 assert(value_object_count == 0, "invalid value object count");
3146
3147 #ifdef ASSERT
3148 if (_has_inline_recv) {
3149 // In RO signatures, inline receivers must be represented as a single T_OBJECT
3150 assert(_sig_cc_ro->length() >= 1, "sig_cc_ro must include receiver");
3151 assert(_sig_cc_ro->at(0)._bt == T_OBJECT,
3152 "sig_cc_ro must represent inline receiver as T_OBJECT");
3153 assert(_sig_cc_ro->length() <= _sig_cc->length(),
3154 "sig_cc_ro must not be longer than sig_cc");
3155 }
3156 #endif
3157
3158 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3159 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3160
3161 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3162 if (has_scalarized_arguments) {
3163 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3164 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3165
3166 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3167 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3168
3169 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3170 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3171 } else {
3172 // No scalarized args
3173 _sig_cc = _sig;
3174 _regs_cc = _regs;
3175 _args_on_stack_cc = _args_on_stack;
3176
3177 _sig_cc_ro = _sig;
3178 _regs_cc_ro = _regs;
3179 _args_on_stack_cc_ro = _args_on_stack;
3180 }
3181
3182 #ifdef ASSERT
3183 {
3184 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(_sig_cc, _has_inline_recv);
3185 assert(fingerprint->equals(compare_fp), "%s - %s", fingerprint->as_string(), compare_fp->as_string());
3186 AdapterFingerPrint::deallocate(compare_fp);
3187 }
3188 #endif
3189 }
3190
3191 const char* AdapterHandlerEntry::_entry_names[] = {
3192 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
3193 };
3194
3195 #ifdef ASSERT
3196 void AdapterHandlerLibrary::verify_adapter_sharing(CompiledEntrySignature& ces, AdapterHandlerEntry* cached_entry) {
3197 // we can only check for the same code if there is any
3198 #ifndef ZERO
3199 AdapterHandlerEntry* comparison_entry = create_adapter(ces, false, true);
3200 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
3201 assert(comparison_entry->compare_code(cached_entry), "code must match");
3202 // Release the one just created
3203 AdapterHandlerEntry::deallocate(comparison_entry);
3204 # endif // ZERO
3205 }
3206 #endif /* ASSERT*/
3207
3208 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3209 assert(!method->is_abstract() || InlineTypePassFieldsAsArgs, "abstract methods do not have adapters");
3210 // Use customized signature handler. Need to lock around updates to
3211 // the _adapter_handler_table (it is not safe for concurrent readers
3212 // and a single writer: this could be fixed if it becomes a
3213 // problem).
3214
3215 // Fast-path for trivial adapters
3216 AdapterHandlerEntry* entry = get_simple_adapter(method);
3217 if (entry != nullptr) {
3218 return entry;
3219 }
3220
3221 ResourceMark rm;
3222 bool new_entry = false;
3223
3224 CompiledEntrySignature ces(method());
3225 ces.compute_calling_conventions();
3226 if (ces.has_scalarized_args()) {
3227 if (!method->has_scalarized_args()) {
3228 method->set_has_scalarized_args();
3229 }
3230 if (ces.c1_needs_stack_repair()) {
3231 method->set_c1_needs_stack_repair();
3232 }
3233 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
3234 method->set_c2_needs_stack_repair();
3235 }
3236 }
3237
3238 {
3239 MutexLocker mu(AdapterHandlerLibrary_lock);
3240
3241 // Lookup method signature's fingerprint
3242 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3243
3244 if (entry != nullptr) {
3245 #ifndef ZERO
3246 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
3247 #endif
3248 #ifdef ASSERT
3249 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
3250 verify_adapter_sharing(ces, entry);
3251 }
3252 #endif
3253 } else {
3254 entry = create_adapter(ces, /* allocate_code_blob */ true);
3255 if (entry != nullptr) {
3256 new_entry = true;
3257 }
3258 }
3259 }
3260
3261 // Outside of the lock
3262 if (new_entry) {
3263 post_adapter_creation(entry);
3264 }
3265 return entry;
3266 }
3267
3268 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
3269 ResourceMark rm;
3270 const char* name = AdapterHandlerLibrary::name(handler);
3271 const uint32_t id = AdapterHandlerLibrary::id(handler);
3272
3273 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
3274 if (blob != nullptr) {
3275 handler->set_adapter_blob(blob->as_adapter_blob());
3276 }
3277 }
3278
3279 #ifndef PRODUCT
3280 void AdapterHandlerLibrary::print_adapter_handler_info(outputStream* st, AdapterHandlerEntry* handler) {
3281 ttyLocker ttyl;
3282 ResourceMark rm;
3283 int insts_size;
3284 // on Zero the blob may be null
3285 handler->print_adapter_on(tty);
3286 AdapterBlob* adapter_blob = handler->adapter_blob();
3287 if (adapter_blob == nullptr) {
3288 return;
3289 }
3290 insts_size = adapter_blob->code_size();
3291 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
3292 handler->fingerprint()->as_basic_args_string(),
3293 handler->fingerprint()->as_string(), insts_size);
3294 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
3295 if (Verbose || PrintStubCode) {
3296 address first_pc = adapter_blob->content_begin();
3297 if (first_pc != nullptr) {
3298 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
3299 st->cr();
3300 }
3301 }
3302 }
3303 #endif // PRODUCT
3304
3305 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
3306 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
3307 entry_offset[AdapterBlob::I2C] = 0;
3308 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
3309 entry_offset[AdapterBlob::C2I_Inline] = entry_address[AdapterBlob::C2I_Inline] - entry_address[AdapterBlob::I2C];
3310 entry_offset[AdapterBlob::C2I_Inline_RO] = entry_address[AdapterBlob::C2I_Inline_RO] - entry_address[AdapterBlob::I2C];
3311 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
3312 entry_offset[AdapterBlob::C2I_Unverified_Inline] = entry_address[AdapterBlob::C2I_Unverified_Inline] - entry_address[AdapterBlob::I2C];
3313 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
3314 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
3315 } else {
3316 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
3317 }
3318 }
3319
3320 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
3321 CompiledEntrySignature& ces,
3322 bool allocate_code_blob,
3323 bool is_transient) {
3324 if (log_is_enabled(Info, perf, class, link)) {
3325 ClassLoader::perf_method_adapters_count()->inc();
3326 }
3327
3328 #ifndef ZERO
3329 AdapterBlob* adapter_blob = nullptr;
3330 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3331 CodeBuffer buffer(buf);
3332 short buffer_locs[20];
3333 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3334 sizeof(buffer_locs)/sizeof(relocInfo));
3335 MacroAssembler masm(&buffer);
3336 address entry_address[AdapterBlob::ENTRY_COUNT];
3337
3338 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
3339 SharedRuntime::generate_i2c2i_adapters(&masm,
3340 ces.args_on_stack(),
3341 ces.sig(),
3342 ces.regs(),
3343 ces.sig_cc(),
3344 ces.regs_cc(),
3345 ces.sig_cc_ro(),
3346 ces.regs_cc_ro(),
3347 entry_address,
3348 adapter_blob,
3349 allocate_code_blob);
3350
3351 if (ces.has_scalarized_args()) {
3352 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3353 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3354 heap_sig->appendAll(ces.sig_cc());
3355 handler->set_sig_cc(heap_sig);
3356 heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
3357 heap_sig->appendAll(ces.sig_cc_ro());
3358 handler->set_sig_cc_ro(heap_sig);
3359 }
3360 // On zero there is no code to save and no need to create a blob and
3361 // or relocate the handler.
3362 int entry_offset[AdapterBlob::ENTRY_COUNT];
3363 address_to_offset(entry_address, entry_offset);
3364 #ifdef ASSERT
3365 if (VerifyAdapterSharing) {
3366 handler->save_code(buf->code_begin(), buffer.insts_size());
3367 if (is_transient) {
3368 return true;
3369 }
3370 }
3371 #endif
3372 if (adapter_blob == nullptr) {
3373 // CodeCache is full, disable compilation
3374 // Ought to log this but compile log is only per compile thread
3375 // and we're some non descript Java thread.
3376 return false;
3377 }
3378 handler->set_adapter_blob(adapter_blob);
3379 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
3380 // try to save generated code
3381 const char* name = AdapterHandlerLibrary::name(handler);
3382 const uint32_t id = AdapterHandlerLibrary::id(handler);
3383 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
3384 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
3385 }
3386 #endif // ZERO
3387
3388 #ifndef PRODUCT
3389 // debugging support
3390 if (PrintAdapterHandlers || PrintStubCode) {
3391 print_adapter_handler_info(tty, handler);
3392 }
3393 #endif
3394
3395 return true;
3396 }
3397
3398 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(CompiledEntrySignature& ces,
3399 bool allocate_code_blob,
3400 bool is_transient) {
3401 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(ces.sig_cc(), ces.has_inline_recv());
3402 #ifdef ASSERT
3403 // Verify that we can successfully restore the compiled entry signature object.
3404 CompiledEntrySignature ces_verify;
3405 ces_verify.initialize_from_fingerprint(fp);
3406 #endif
3407 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
3408 if (!generate_adapter_code(handler, ces, allocate_code_blob, is_transient)) {
3409 AdapterHandlerEntry::deallocate(handler);
3410 return nullptr;
3411 }
3412 if (!is_transient) {
3413 assert_lock_strong(AdapterHandlerLibrary_lock);
3414 _adapter_handler_table->put(fp, handler);
3415 }
3416 return handler;
3417 }
3418
3419 #if INCLUDE_CDS
3420 void AdapterHandlerEntry::remove_unshareable_info() {
3421 #ifdef ASSERT
3422 _saved_code = nullptr;
3423 _saved_code_length = 0;
3424 #endif // ASSERT
3425 _adapter_blob = nullptr;
3426 _linked = false;
3427 _sig_cc = nullptr;
3428 _sig_cc_ro = nullptr;
3429 }
3430
3431 class CopyAdapterTableToArchive : StackObj {
3432 private:
3433 CompactHashtableWriter* _writer;
3434 ArchiveBuilder* _builder;
3435 public:
3436 CopyAdapterTableToArchive(CompactHashtableWriter* writer) : _writer(writer),
3437 _builder(ArchiveBuilder::current())
3438 {}
3439
3440 bool do_entry(AdapterFingerPrint* fp, AdapterHandlerEntry* entry) {
3441 LogStreamHandle(Trace, aot) lsh;
3442 if (ArchiveBuilder::current()->has_been_archived((address)entry)) {
3443 assert(ArchiveBuilder::current()->has_been_archived((address)fp), "must be");
3444 AdapterFingerPrint* buffered_fp = ArchiveBuilder::current()->get_buffered_addr(fp);
3445 assert(buffered_fp != nullptr,"sanity check");
3446 AdapterHandlerEntry* buffered_entry = ArchiveBuilder::current()->get_buffered_addr(entry);
3447 assert(buffered_entry != nullptr,"sanity check");
3448
3449 uint hash = fp->compute_hash();
3450 _writer->add(hash, AOTCompressedPointers::encode_not_null(buffered_entry));
3451 if (lsh.is_enabled()) {
3452 address fp_runtime_addr = (address)buffered_fp + ArchiveBuilder::current()->buffer_to_requested_delta();
3453 address entry_runtime_addr = (address)buffered_entry + ArchiveBuilder::current()->buffer_to_requested_delta();
3454 log_trace(aot)("Added fp=%p (%s), entry=%p to the archived adater table", buffered_fp, buffered_fp->as_basic_args_string(), buffered_entry);
3455 }
3456 } else {
3457 if (lsh.is_enabled()) {
3458 log_trace(aot)("Skipping adapter handler %p (fp=%s) as it is not archived", entry, fp->as_basic_args_string());
3459 }
3460 }
3461 return true;
3462 }
3463 };
3464
3465 void AdapterHandlerLibrary::dump_aot_adapter_table() {
3466 CompactHashtableStats stats;
3467 CompactHashtableWriter writer(_adapter_handler_table->number_of_entries(), &stats);
3468 CopyAdapterTableToArchive copy(&writer);
3469 _adapter_handler_table->iterate(©);
3470 writer.dump(&_aot_adapter_handler_table, "archived adapter table");
3471 }
3472
3473 void AdapterHandlerLibrary::serialize_shared_table_header(SerializeClosure* soc) {
3474 _aot_adapter_handler_table.serialize_header(soc);
3475 }
3476
3477 void AdapterHandlerLibrary::link_aot_adapter_handler(AdapterHandlerEntry* handler) {
3478 #ifdef ASSERT
3479 if (TestAOTAdapterLinkFailure) {
3480 return;
3481 }
3482 #endif
3483 lookup_aot_cache(handler);
3484 #ifndef PRODUCT
3485 // debugging support
3486 if (PrintAdapterHandlers || PrintStubCode) {
3487 print_adapter_handler_info(tty, handler);
3488 }
3489 #endif
3490 }
3491
3492 // This method is used during production run to link archived adapters (stored in AOT Cache)
3493 // to their code in AOT Code Cache
3494 void AdapterHandlerEntry::link() {
3495 ResourceMark rm;
3496 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3497 bool generate_code = false;
3498 // Generate code only if AOTCodeCache is not available, or
3499 // caching adapters is disabled, or we fail to link
3500 // the AdapterHandlerEntry to its code in the AOTCodeCache
3501 if (AOTCodeCache::is_using_adapter()) {
3502 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3503 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3504 if (_adapter_blob == nullptr) {
3505 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3506 generate_code = true;
3507 }
3508
3509 if (get_sig_cc() == nullptr) {
3510 // Calling conventions have to be regenerated at runtime and are accessed through method adapters,
3511 // which are archived in the AOT code cache. If the adapters are not regenerated, the
3512 // calling conventions should be regenerated here.
3513 CompiledEntrySignature ces;
3514 ces.initialize_from_fingerprint(_fingerprint);
3515 if (ces.has_scalarized_args()) {
3516 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3517 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3518 heap_sig->appendAll(ces.sig_cc());
3519 set_sig_cc(heap_sig);
3520 heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
3521 heap_sig->appendAll(ces.sig_cc_ro());
3522 set_sig_cc_ro(heap_sig);
3523 }
3524 }
3525 } else {
3526 generate_code = true;
3527 }
3528 if (generate_code) {
3529 CompiledEntrySignature ces;
3530 ces.initialize_from_fingerprint(_fingerprint);
3531 if (!AdapterHandlerLibrary::generate_adapter_code(this, ces, true, false)) {
3532 // Don't throw exceptions during VM initialization because java.lang.* classes
3533 // might not have been initialized, causing problems when constructing the
3534 // Java exception object.
3535 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3536 }
3537 }
3538 if (_adapter_blob != nullptr) {
3539 post_adapter_creation(this);
3540 }
3541 assert(_linked, "AdapterHandlerEntry must now be linked");
3542 }
3543
3544 void AdapterHandlerLibrary::link_aot_adapters() {
3545 uint max_id = 0;
3546 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3547 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3548 * That implies adapter ids of the adapters in the cache may not be contiguous.
3549 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3550 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3551 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3552 */
3553 _aot_adapter_handler_table.iterate_all([&](AdapterHandlerEntry* entry) {
3554 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3555 entry->link();
3556 max_id = MAX2(max_id, entry->id());
3557 });
3558 // Set adapter id to the maximum id found in the AOTCache
3559 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3560 _id_counter = max_id;
3561 }
3562
3563 // This method is called during production run to lookup simple adapters
3564 // in the archived adapter handler table
3565 void AdapterHandlerLibrary::lookup_simple_adapters() {
3566 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3567
3568 MutexLocker mu(AdapterHandlerLibrary_lock);
3569 ResourceMark rm;
3570 CompiledEntrySignature no_args;
3571 no_args.compute_calling_conventions();
3572 _no_arg_handler = lookup(no_args.sig_cc(), no_args.has_inline_recv());
3573
3574 CompiledEntrySignature obj_args;
3575 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
3576 obj_args.compute_calling_conventions();
3577 _obj_arg_handler = lookup(obj_args.sig_cc(), obj_args.has_inline_recv());
3578
3579 CompiledEntrySignature int_args;
3580 SigEntry::add_entry(int_args.sig(), T_INT);
3581 int_args.compute_calling_conventions();
3582 _int_arg_handler = lookup(int_args.sig_cc(), int_args.has_inline_recv());
3583
3584 CompiledEntrySignature obj_int_args;
3585 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
3586 SigEntry::add_entry(obj_int_args.sig(), T_INT);
3587 obj_int_args.compute_calling_conventions();
3588 _obj_int_arg_handler = lookup(obj_int_args.sig_cc(), obj_int_args.has_inline_recv());
3589
3590 CompiledEntrySignature obj_obj_args;
3591 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3592 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3593 obj_obj_args.compute_calling_conventions();
3594 _obj_obj_arg_handler = lookup(obj_obj_args.sig_cc(), obj_obj_args.has_inline_recv());
3595
3596 assert(_no_arg_handler != nullptr &&
3597 _obj_arg_handler != nullptr &&
3598 _int_arg_handler != nullptr &&
3599 _obj_int_arg_handler != nullptr &&
3600 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3601 assert(_no_arg_handler->is_linked() &&
3602 _obj_arg_handler->is_linked() &&
3603 _int_arg_handler->is_linked() &&
3604 _obj_int_arg_handler->is_linked() &&
3605 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3606 }
3607 #endif // INCLUDE_CDS
3608
3609 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3610 LogStreamHandle(Trace, aot) lsh;
3611 if (lsh.is_enabled()) {
3612 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3613 lsh.cr();
3614 }
3615 it->push(&_fingerprint);
3616 }
3617
3618 AdapterHandlerEntry::~AdapterHandlerEntry() {
3619 if (_fingerprint != nullptr) {
3620 AdapterFingerPrint::deallocate(_fingerprint);
3621 _fingerprint = nullptr;
3622 }
3623 if (_sig_cc != nullptr) {
3624 delete _sig_cc;
3625 }
3626 if (_sig_cc_ro != nullptr) {
3627 delete _sig_cc_ro;
3628 }
3629 #ifdef ASSERT
3630 FREE_C_HEAP_ARRAY(_saved_code);
3631 #endif
3632 FreeHeap(this);
3633 }
3634
3635
3636 #ifdef ASSERT
3637 // Capture the code before relocation so that it can be compared
3638 // against other versions. If the code is captured after relocation
3639 // then relative instructions won't be equivalent.
3640 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3641 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3642 _saved_code_length = length;
3643 memcpy(_saved_code, buffer, length);
3644 }
3645
3646
3647 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3648 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3649
3650 if (other->_saved_code_length != _saved_code_length) {
3651 return false;
3652 }
3653
3654 return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
3655 }
3656 #endif
3657
3658
3659 /**
3660 * Create a native wrapper for this native method. The wrapper converts the
3661 * Java-compiled calling convention to the native convention, handles
3662 * arguments, and transitions to native. On return from the native we transition
3663 * back to java blocking if a safepoint is in progress.
3664 */
3665 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3666 ResourceMark rm;
3667 nmethod* nm = nullptr;
3668
3669 // Check if memory should be freed before allocation
3670 CodeCache::gc_on_allocation();
3671
3672 assert(method->is_native(), "must be native");
3673 assert(method->is_special_native_intrinsic() ||
3674 method->has_native_function(), "must have something valid to call!");
3675
3676 {
3677 // Perform the work while holding the lock, but perform any printing outside the lock
3678 MutexLocker mu(AdapterHandlerLibrary_lock);
3679 // See if somebody beat us to it
3680 if (method->code() != nullptr) {
3681 return;
3682 }
3683
3684 const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3685 assert(compile_id > 0, "Must generate native wrapper");
3686
3687
3688 ResourceMark rm;
3689 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3690 if (buf != nullptr) {
3691 CodeBuffer buffer(buf);
3692
3693 if (method->is_continuation_enter_intrinsic()) {
3694 buffer.initialize_stubs_size(192);
3695 }
3696
3697 struct { double data[20]; } locs_buf;
3698 struct { double data[20]; } stubs_locs_buf;
3699 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3700 #if defined(AARCH64) || defined(PPC64)
3701 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3702 // in the constant pool to ensure ordering between the barrier and oops
3703 // accesses. For native_wrappers we need a constant.
3704 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3705 // static java call that is resolved in the runtime.
3706 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3707 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3708 }
3709 #endif
3710 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3711 MacroAssembler _masm(&buffer);
3712
3713 // Fill in the signature array, for the calling-convention call.
3714 const int total_args_passed = method->size_of_parameters();
3715
3716 BasicType stack_sig_bt[16];
3717 VMRegPair stack_regs[16];
3718 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3719 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3720
3721 int i = 0;
3722 if (!method->is_static()) { // Pass in receiver first
3723 sig_bt[i++] = T_OBJECT;
3724 }
3725 SignatureStream ss(method->signature());
3726 for (; !ss.at_return_type(); ss.next()) {
3727 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3728 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3729 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3730 }
3731 }
3732 assert(i == total_args_passed, "");
3733 BasicType ret_type = ss.type();
3734
3735 // Now get the compiled-Java arguments layout.
3736 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3737
3738 // Generate the compiled-to-native wrapper code
3739 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3740
3741 if (nm != nullptr) {
3742 {
3743 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3744 if (nm->make_in_use()) {
3745 method->set_code(method, nm);
3746 }
3747 }
3748
3749 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3750 if (directive->PrintAssemblyOption) {
3751 nm->print_code();
3752 }
3753 DirectivesStack::release(directive);
3754 }
3755 }
3756 } // Unlock AdapterHandlerLibrary_lock
3757
3758
3759 // Install the generated code.
3760 if (nm != nullptr) {
3761 const char *msg = method->is_static() ? "(static)" : "";
3762 CompileTask::print_ul(nm, msg);
3763 if (PrintCompilation) {
3764 ttyLocker ttyl;
3765 CompileTask::print(tty, nm, msg);
3766 }
3767 nm->post_compiled_method_load_event();
3768 }
3769 }
3770
3771 // -------------------------------------------------------------------------
3772 // Java-Java calling convention
3773 // (what you use when Java calls Java)
3774
3775 //------------------------------name_for_receiver----------------------------------
3776 // For a given signature, return the VMReg for parameter 0.
3777 VMReg SharedRuntime::name_for_receiver() {
3778 VMRegPair regs;
3779 BasicType sig_bt = T_OBJECT;
3780 (void) java_calling_convention(&sig_bt, ®s, 1);
3781 // Return argument 0 register. In the LP64 build pointers
3782 // take 2 registers, but the VM wants only the 'main' name.
3783 return regs.first();
3784 }
3785
3786 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
3787 // This method is returning a data structure allocating as a
3788 // ResourceObject, so do not put any ResourceMarks in here.
3789
3790 BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
3791 VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
3792 int cnt = 0;
3793 if (has_receiver) {
3794 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
3795 }
3796
3797 for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) {
3798 BasicType type = ss.type();
3799 sig_bt[cnt++] = type;
3800 if (is_double_word_type(type))
3801 sig_bt[cnt++] = T_VOID;
3802 }
3803
3804 if (has_appendix) {
3805 sig_bt[cnt++] = T_OBJECT;
3806 }
3807
3808 assert(cnt < 256, "grow table size");
3809
3810 int comp_args_on_stack;
3811 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt);
3812
3813 // the calling convention doesn't count out_preserve_stack_slots so
3814 // we must add that in to get "true" stack offsets.
3815
3816 if (comp_args_on_stack) {
3817 for (int i = 0; i < cnt; i++) {
3818 VMReg reg1 = regs[i].first();
3819 if (reg1->is_stack()) {
3820 // Yuck
3821 reg1 = reg1->bias(out_preserve_stack_slots());
3822 }
3823 VMReg reg2 = regs[i].second();
3824 if (reg2->is_stack()) {
3825 // Yuck
3826 reg2 = reg2->bias(out_preserve_stack_slots());
3827 }
3828 regs[i].set_pair(reg2, reg1);
3829 }
3830 }
3831
3832 // results
3833 *arg_size = cnt;
3834 return regs;
3835 }
3836
3837 // OSR Migration Code
3838 //
3839 // This code is used convert interpreter frames into compiled frames. It is
3840 // called from very start of a compiled OSR nmethod. A temp array is
3841 // allocated to hold the interesting bits of the interpreter frame. All
3842 // active locks are inflated to allow them to move. The displaced headers and
3843 // active interpreter locals are copied into the temp buffer. Then we return
3844 // back to the compiled code. The compiled code then pops the current
3845 // interpreter frame off the stack and pushes a new compiled frame. Then it
3846 // copies the interpreter locals and displaced headers where it wants.
3847 // Finally it calls back to free the temp buffer.
3848 //
3849 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3850
3851 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
3852 assert(current == JavaThread::current(), "pre-condition");
3853 JFR_ONLY(Jfr::check_and_process_sample_request(current);)
3854 // During OSR migration, we unwind the interpreted frame and replace it with a compiled
3855 // frame. The stack watermark code below ensures that the interpreted frame is processed
3856 // before it gets unwound. This is helpful as the size of the compiled frame could be
3857 // larger than the interpreted frame, which could result in the new frame not being
3858 // processed correctly.
3859 StackWatermarkSet::before_unwind(current);
3860
3861 //
3862 // This code is dependent on the memory layout of the interpreter local
3863 // array and the monitors. On all of our platforms the layout is identical
3864 // so this code is shared. If some platform lays the their arrays out
3865 // differently then this code could move to platform specific code or
3866 // the code here could be modified to copy items one at a time using
3867 // frame accessor methods and be platform independent.
3868
3869 frame fr = current->last_frame();
3870 assert(fr.is_interpreted_frame(), "");
3871 assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
3872
3873 // Figure out how many monitors are active.
3874 int active_monitor_count = 0;
3875 for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
3876 kptr < fr.interpreter_frame_monitor_begin();
3877 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
3878 if (kptr->obj() != nullptr) active_monitor_count++;
3879 }
3880
3881 // QQQ we could place number of active monitors in the array so that compiled code
3882 // could double check it.
3883
3884 Method* moop = fr.interpreter_frame_method();
3885 int max_locals = moop->max_locals();
3886 // Allocate temp buffer, 1 word per local & 2 per active monitor
3887 int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
3888 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
3889
3890 // Copy the locals. Order is preserved so that loading of longs works.
3891 // Since there's no GC I can copy the oops blindly.
3892 assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
3893 Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
3894 (HeapWord*)&buf[0],
3895 max_locals);
3896
3897 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
3898 int i = max_locals;
3899 for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3900 kptr2 < fr.interpreter_frame_monitor_begin();
3901 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3902 if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array
3903 BasicLock *lock = kptr2->lock();
3904 if (UseObjectMonitorTable) {
3905 buf[i] = (intptr_t)lock->object_monitor_cache();
3906 }
3907 #ifdef ASSERT
3908 else {
3909 buf[i] = badDispHeaderOSR;
3910 }
3911 #endif
3912 i++;
3913 buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3914 }
3915 }
3916 assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3917
3918 RegisterMap map(current,
3919 RegisterMap::UpdateMap::skip,
3920 RegisterMap::ProcessFrames::include,
3921 RegisterMap::WalkContinuation::skip);
3922 frame sender = fr.sender(&map);
3923 if (sender.is_interpreted_frame()) {
3924 current->push_cont_fastpath(sender.unextended_sp());
3925 }
3926
3927 return buf;
3928 JRT_END
3929
3930 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3931 FREE_C_HEAP_ARRAY(buf);
3932 JRT_END
3933
3934 const char* AdapterHandlerLibrary::name(AdapterHandlerEntry* handler) {
3935 return handler->fingerprint()->as_basic_args_string();
3936 }
3937
3938 uint32_t AdapterHandlerLibrary::id(AdapterHandlerEntry* handler) {
3939 return handler->id();
3940 }
3941
3942 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3943 bool found = false;
3944 #if INCLUDE_CDS
3945 if (AOTCodeCache::is_using_adapter()) {
3946 auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3947 if (b == handler->adapter_blob()) {
3948 found = true;
3949 st->print("Adapter for signature: ");
3950 handler->print_adapter_on(st);
3951 return false; // abort iteration
3952 } else {
3953 return true; // keep looking
3954 }
3955 };
3956 _aot_adapter_handler_table.iterate(findblob_archived_table);
3957 }
3958 #endif // INCLUDE_CDS
3959 if (!found) {
3960 auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* handler) {
3961 if (b == handler->adapter_blob()) {
3962 found = true;
3963 st->print("Adapter for signature: ");
3964 handler->print_adapter_on(st);
3965 return false; // abort iteration
3966 } else {
3967 return true; // keep looking
3968 }
3969 };
3970 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3971 _adapter_handler_table->iterate(findblob_runtime_table);
3972 }
3973 assert(found, "Should have found handler");
3974 }
3975
3976 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3977 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3978 if (adapter_blob() != nullptr) {
3979 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3980 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3981 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3982 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3983 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3984 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3985 if (get_c2i_no_clinit_check_entry() != nullptr) {
3986 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3987 }
3988 }
3989 st->cr();
3990 }
3991
3992 #ifndef PRODUCT
3993
3994 void AdapterHandlerLibrary::print_statistics() {
3995 print_table_statistics();
3996 }
3997
3998 #endif /* PRODUCT */
3999
4000 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
4001 assert(current == JavaThread::current(), "pre-condition");
4002 StackOverflow* overflow_state = current->stack_overflow_state();
4003 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
4004 overflow_state->set_reserved_stack_activation(current->stack_base());
4005 JRT_END
4006
4007 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
4008 ResourceMark rm(current);
4009 frame activation;
4010 nmethod* nm = nullptr;
4011 int count = 1;
4012
4013 assert(fr.is_java_frame(), "Must start on Java frame");
4014
4015 RegisterMap map(JavaThread::current(),
4016 RegisterMap::UpdateMap::skip,
4017 RegisterMap::ProcessFrames::skip,
4018 RegisterMap::WalkContinuation::skip); // don't walk continuations
4019 for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
4020 if (!fr.is_java_frame()) {
4021 continue;
4022 }
4023
4024 Method* method = nullptr;
4025 bool found = false;
4026 if (fr.is_interpreted_frame()) {
4027 method = fr.interpreter_frame_method();
4028 if (method != nullptr && method->has_reserved_stack_access()) {
4029 found = true;
4030 }
4031 } else {
4032 CodeBlob* cb = fr.cb();
4033 if (cb != nullptr && cb->is_nmethod()) {
4034 nm = cb->as_nmethod();
4035 method = nm->method();
4036 for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) {
4037 method = sd->method();
4038 if (method != nullptr && method->has_reserved_stack_access()) {
4039 found = true;
4040 }
4041 }
4042 }
4043 }
4044 if (found) {
4045 activation = fr;
4046 warning("Potentially dangerous stack overflow in "
4047 "ReservedStackAccess annotated method %s [%d]",
4048 method->name_and_sig_as_C_string(), count++);
4049 EventReservedStackActivation event;
4050 if (event.should_commit()) {
4051 event.set_method(method);
4052 event.commit();
4053 }
4054 }
4055 }
4056 return activation;
4057 }
4058
4059 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
4060 // After any safepoint, just before going back to compiled code,
4061 // we inform the GC that we will be doing initializing writes to
4062 // this object in the future without emitting card-marks, so
4063 // GC may take any compensating steps.
4064
4065 oop new_obj = current->vm_result_oop();
4066 if (new_obj == nullptr) return;
4067
4068 BarrierSet *bs = BarrierSet::barrier_set();
4069 bs->on_slowpath_allocation_exit(current, new_obj);
4070 }
4071
4072 // We are at a compiled code to interpreter call. We need backing
4073 // buffers for all inline type arguments. Allocate an object array to
4074 // hold them (convenient because once we're done with it we don't have
4075 // to worry about freeing it).
4076 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, bool from_c1, TRAPS) {
4077 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
4078 ResourceMark rm;
4079
4080 // Retrieve arguments passed at the call
4081 RegisterMap reg_map2(THREAD,
4082 RegisterMap::UpdateMap::include,
4083 RegisterMap::ProcessFrames::include,
4084 RegisterMap::WalkContinuation::skip);
4085 frame stubFrame = THREAD->last_frame();
4086 frame callerFrame = stubFrame.sender(®_map2);
4087 if (from_c1) {
4088 callerFrame = callerFrame.sender(®_map2);
4089 }
4090 int arg_size;
4091 const GrowableArray<SigEntry>* sig = allocate_receiver ? callee->adapter()->get_sig_cc() : callee->adapter()->get_sig_cc_ro();
4092 assert(sig != nullptr, "sig should never be null");
4093 TempNewSymbol tmp_sig = SigEntry::create_symbol(sig);
4094 VMRegPair* reg_pairs = find_callee_arguments(tmp_sig, false, false, &arg_size);
4095
4096 int nb_slots = 0;
4097 InstanceKlass* holder = callee->method_holder();
4098 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
4099 if (allocate_receiver) {
4100 nb_slots++;
4101 }
4102 int arg_num = callee->is_static() ? 0 : 1;
4103 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4104 BasicType bt = ss.type();
4105 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4106 nb_slots++;
4107 }
4108 if (bt != T_VOID) {
4109 arg_num++;
4110 }
4111 }
4112 objArrayOop array_oop = nullptr;
4113 objArrayHandle array;
4114 arg_num = callee->is_static() ? 0 : 1;
4115 int i = 0;
4116 uint pos = 0;
4117 uint depth = 0;
4118 uint ignored = 0;
4119 if (allocate_receiver) {
4120 assert(sig->at(pos)._bt == T_METADATA, "scalarized value expected");
4121 pos++;
4122 ignored++;
4123 depth++;
4124 assert(sig->at(pos)._bt == T_OBJECT, "buffer argument");
4125 uint reg_pos = 0;
4126 assert(reg_pos < (uint)arg_size, "");
4127 VMRegPair reg_pair = reg_pairs[reg_pos];
4128 oop* buffer = callerFrame.oopmapreg_to_oop_location(reg_pair.first(), ®_map2);
4129 instanceHandle h_buffer(THREAD, (instanceOop)*buffer);
4130 InlineKlass* vk = InlineKlass::cast(holder);
4131 if (h_buffer.not_null()) {
4132 assert(h_buffer->klass() == vk, "buffer not of expected class");
4133 } else {
4134 // Only allocate if buffer passed at the call is null
4135 if (array_oop == nullptr) {
4136 array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4137 array = objArrayHandle(THREAD, array_oop);
4138 }
4139 oop res = vk->allocate_instance(CHECK_NULL);
4140 array->obj_at_put(i, res);
4141 }
4142 i++;
4143 }
4144 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4145 BasicType bt = ss.type();
4146 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4147 while (true) {
4148 BasicType bt = sig->at(pos)._bt;
4149 if (bt == T_METADATA) {
4150 depth++;
4151 ignored++;
4152 if (depth == 1) {
4153 break;
4154 }
4155 } else if (bt == T_VOID && sig->at(pos - 1)._bt != T_LONG && sig->at(pos - 1)._bt != T_DOUBLE) {
4156 ignored++;
4157 depth--;
4158 }
4159 pos++;
4160 }
4161 pos++;
4162 assert(sig->at(pos)._bt == T_OBJECT, "buffer argument expected");
4163 uint reg_pos = pos - ignored;
4164 assert(reg_pos < (uint)arg_size, "out of bound register?");
4165 VMRegPair reg_pair = reg_pairs[reg_pos];
4166 oop* buffer = callerFrame.oopmapreg_to_oop_location(reg_pair.first(), ®_map2);
4167 instanceHandle h_buffer(THREAD, (instanceOop)*buffer);
4168 InlineKlass* vk = ss.as_inline_klass(holder);
4169 assert(vk != nullptr, "Unexpected klass");
4170 if (h_buffer.not_null()) {
4171 assert(h_buffer->klass() == vk, "buffer not of expected class");
4172 } else {
4173 // Only allocate if buffer passed at the call is null
4174 if (array_oop == nullptr) {
4175 array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4176 array = objArrayHandle(THREAD, array_oop);
4177 }
4178 oop res = vk->allocate_instance(CHECK_NULL);
4179 array->obj_at_put(i, res);
4180 }
4181 i++;
4182 }
4183 if (bt != T_VOID) {
4184 arg_num++;
4185 }
4186 }
4187 return array();
4188 }
4189
4190 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
4191 methodHandle callee(current, callee_method);
4192 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, false, CHECK);
4193 current->set_vm_result_oop(array);
4194 JRT_END
4195
4196 // We're returning from an interpreted method: load each field into a
4197 // register following the calling convention
4198 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
4199 {
4200 assert(res->klass()->is_inline_klass(), "only inline types here");
4201 ResourceMark rm;
4202 RegisterMap reg_map(current,
4203 RegisterMap::UpdateMap::include,
4204 RegisterMap::ProcessFrames::include,
4205 RegisterMap::WalkContinuation::skip);
4206 frame stubFrame = current->last_frame();
4207 frame callerFrame = stubFrame.sender(®_map);
4208 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
4209
4210 InlineKlass* vk = InlineKlass::cast(res->klass());
4211
4212 const Array<SigEntry>* sig_vk = vk->extended_sig();
4213 const Array<VMRegPair>* regs = vk->return_regs();
4214
4215 if (regs == nullptr) {
4216 // The fields of the inline klass don't fit in registers, bail out
4217 return;
4218 }
4219
4220 int j = 1;
4221 for (int i = 0; i < sig_vk->length(); i++) {
4222 BasicType bt = sig_vk->at(i)._bt;
4223 if (bt == T_METADATA) {
4224 continue;
4225 }
4226 if (bt == T_VOID) {
4227 if (sig_vk->at(i-1)._bt == T_LONG ||
4228 sig_vk->at(i-1)._bt == T_DOUBLE) {
4229 j++;
4230 }
4231 continue;
4232 }
4233 int off = sig_vk->at(i)._offset;
4234 assert(off > 0, "offset in object should be positive");
4235 VMRegPair pair = regs->at(j);
4236 address loc = reg_map.location(pair.first(), nullptr);
4237 guarantee(loc != nullptr, "bad register save location");
4238 switch(bt) {
4239 case T_BOOLEAN:
4240 *(jboolean*)loc = res->bool_field(off);
4241 break;
4242 case T_CHAR:
4243 *(jchar*)loc = res->char_field(off);
4244 break;
4245 case T_BYTE:
4246 *(jbyte*)loc = res->byte_field(off);
4247 break;
4248 case T_SHORT:
4249 *(jshort*)loc = res->short_field(off);
4250 break;
4251 case T_INT: {
4252 *(jint*)loc = res->int_field(off);
4253 break;
4254 }
4255 case T_LONG:
4256 #ifdef _LP64
4257 *(intptr_t*)loc = res->long_field(off);
4258 #else
4259 Unimplemented();
4260 #endif
4261 break;
4262 case T_OBJECT:
4263 case T_ARRAY: {
4264 *(oop*)loc = res->obj_field(off);
4265 break;
4266 }
4267 case T_FLOAT:
4268 *(jfloat*)loc = res->float_field(off);
4269 break;
4270 case T_DOUBLE:
4271 *(jdouble*)loc = res->double_field(off);
4272 break;
4273 default:
4274 ShouldNotReachHere();
4275 }
4276 j++;
4277 }
4278 assert(j == regs->length(), "missed a field?");
4279
4280 #ifdef ASSERT
4281 VMRegPair pair = regs->at(0);
4282 address loc = reg_map.location(pair.first(), nullptr);
4283 assert(*(oopDesc**)loc == res, "overwritten object");
4284 #endif
4285
4286 current->set_vm_result_oop(res);
4287 }
4288 JRT_END
4289
4290 // We've returned to an interpreted method, the interpreter needs a
4291 // reference to an inline type instance. Allocate it and initialize it
4292 // from field's values in registers.
4293 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
4294 {
4295 ResourceMark rm;
4296 RegisterMap reg_map(current,
4297 RegisterMap::UpdateMap::include,
4298 RegisterMap::ProcessFrames::include,
4299 RegisterMap::WalkContinuation::skip);
4300 frame stubFrame = current->last_frame();
4301 frame callerFrame = stubFrame.sender(®_map);
4302
4303 #ifdef ASSERT
4304 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
4305 #endif
4306
4307 if (!is_set_nth_bit(res, 0)) {
4308 // We're not returning with inline type fields in registers (the
4309 // calling convention didn't allow it for this inline klass)
4310 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
4311 current->set_vm_result_oop((oopDesc*)res);
4312 assert(verif_vk == nullptr, "broken calling convention");
4313 return;
4314 }
4315
4316 clear_nth_bit(res, 0);
4317 InlineKlass* vk = (InlineKlass*)res;
4318 assert(verif_vk == vk, "broken calling convention");
4319 assert(Metaspace::contains((void*)res), "should be klass");
4320
4321 // Allocate handles for every oop field so they are safe in case of
4322 // a safepoint when allocating
4323 GrowableArray<Handle> handles;
4324 vk->save_oop_fields(reg_map, handles);
4325
4326 // It's unsafe to safepoint until we are here
4327 JRT_BLOCK;
4328 {
4329 JavaThread* THREAD = current;
4330 oop vt = vk->realloc_result(reg_map, handles, CHECK);
4331 current->set_vm_result_oop(vt);
4332 }
4333 JRT_BLOCK_END;
4334 }
4335 JRT_END