1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotCompressedPointers.hpp"
26 #include "cds/archiveBuilder.hpp"
27 #include "cds/archiveUtils.inline.hpp"
28 #include "classfile/classLoader.hpp"
29 #include "classfile/compactHashtable.hpp"
30 #include "classfile/javaClasses.inline.hpp"
31 #include "classfile/stringTable.hpp"
32 #include "classfile/vmClasses.hpp"
33 #include "classfile/vmSymbols.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeCache.hpp"
36 #include "code/compiledIC.hpp"
37 #include "code/nmethod.inline.hpp"
38 #include "code/scopeDesc.hpp"
39 #include "code/vtableStubs.hpp"
40 #include "compiler/abstractCompiler.hpp"
41 #include "compiler/compileBroker.hpp"
42 #include "compiler/disassembler.hpp"
43 #include "gc/shared/barrierSet.hpp"
44 #include "gc/shared/collectedHeap.hpp"
45 #include "interpreter/interpreter.hpp"
46 #include "interpreter/interpreterRuntime.hpp"
47 #include "jfr/jfrEvents.hpp"
48 #include "jvm.h"
49 #include "logging/log.hpp"
50 #include "memory/oopFactory.hpp"
51 #include "memory/resourceArea.hpp"
52 #include "memory/universe.hpp"
53 #include "metaprogramming/primitiveConversions.hpp"
54 #include "oops/access.hpp"
55 #include "oops/fieldStreams.inline.hpp"
56 #include "oops/inlineKlass.inline.hpp"
57 #include "oops/klass.hpp"
58 #include "oops/method.inline.hpp"
59 #include "oops/objArrayKlass.hpp"
60 #include "oops/objArrayOop.inline.hpp"
61 #include "oops/oop.inline.hpp"
62 #include "prims/forte.hpp"
63 #include "prims/jvmtiExport.hpp"
64 #include "prims/jvmtiThreadState.hpp"
65 #include "prims/methodHandles.hpp"
66 #include "prims/nativeLookup.hpp"
67 #include "runtime/arguments.hpp"
68 #include "runtime/atomicAccess.hpp"
69 #include "runtime/basicLock.inline.hpp"
70 #include "runtime/frame.inline.hpp"
71 #include "runtime/handles.inline.hpp"
72 #include "runtime/init.hpp"
73 #include "runtime/interfaceSupport.inline.hpp"
74 #include "runtime/java.hpp"
75 #include "runtime/javaCalls.hpp"
76 #include "runtime/jniHandles.inline.hpp"
77 #include "runtime/osThread.hpp"
78 #include "runtime/perfData.hpp"
79 #include "runtime/sharedRuntime.hpp"
80 #include "runtime/signature.hpp"
81 #include "runtime/stackWatermarkSet.hpp"
82 #include "runtime/stubRoutines.hpp"
83 #include "runtime/synchronizer.hpp"
84 #include "runtime/timerTrace.hpp"
85 #include "runtime/vframe.inline.hpp"
86 #include "runtime/vframeArray.hpp"
87 #include "runtime/vm_version.hpp"
88 #include "utilities/copy.hpp"
89 #include "utilities/dtrace.hpp"
90 #include "utilities/events.hpp"
91 #include "utilities/exceptions.hpp"
92 #include "utilities/globalDefinitions.hpp"
93 #include "utilities/hashTable.hpp"
94 #include "utilities/macros.hpp"
95 #include "utilities/xmlstream.hpp"
96 #ifdef COMPILER1
97 #include "c1/c1_Runtime1.hpp"
98 #endif
99 #ifdef COMPILER2
100 #include "opto/runtime.hpp"
101 #endif
102 #if INCLUDE_JFR
103 #include "jfr/jfr.inline.hpp"
104 #endif
105
106 // Shared runtime stub routines reside in their own unique blob with a
107 // single entry point
108
109
110 #define SHARED_STUB_FIELD_DEFINE(name, type) \
111 type* SharedRuntime::BLOB_FIELD_NAME(name);
112 SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE)
113 #undef SHARED_STUB_FIELD_DEFINE
114
115 nmethod* SharedRuntime::_cont_doYield_stub;
116
117 #if 0
118 // TODO tweak global stub name generation to match this
119 #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob",
120 const char *SharedRuntime::_stub_names[] = {
121 SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE)
122 };
123 #endif
124
125 //----------------------------generate_stubs-----------------------------------
126 void SharedRuntime::generate_initial_stubs() {
127 // Build this early so it's available for the interpreter.
128 _throw_StackOverflowError_blob =
129 generate_throw_exception(StubId::shared_throw_StackOverflowError_id,
130 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
131 }
132
133 void SharedRuntime::generate_stubs() {
134 _wrong_method_blob =
135 generate_resolve_blob(StubId::shared_wrong_method_id,
136 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method));
137 _wrong_method_abstract_blob =
138 generate_resolve_blob(StubId::shared_wrong_method_abstract_id,
139 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract));
140 _ic_miss_blob =
141 generate_resolve_blob(StubId::shared_ic_miss_id,
142 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss));
143 _resolve_opt_virtual_call_blob =
144 generate_resolve_blob(StubId::shared_resolve_opt_virtual_call_id,
145 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C));
146 _resolve_virtual_call_blob =
147 generate_resolve_blob(StubId::shared_resolve_virtual_call_id,
148 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C));
149 _resolve_static_call_blob =
150 generate_resolve_blob(StubId::shared_resolve_static_call_id,
151 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C));
152
153 _throw_delayed_StackOverflowError_blob =
154 generate_throw_exception(StubId::shared_throw_delayed_StackOverflowError_id,
155 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
156
157 _throw_AbstractMethodError_blob =
158 generate_throw_exception(StubId::shared_throw_AbstractMethodError_id,
159 CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
160
161 _throw_IncompatibleClassChangeError_blob =
162 generate_throw_exception(StubId::shared_throw_IncompatibleClassChangeError_id,
163 CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
164
165 _throw_NullPointerException_at_call_blob =
166 generate_throw_exception(StubId::shared_throw_NullPointerException_at_call_id,
167 CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
168
169 #if COMPILER2_OR_JVMCI
170 // Vectors are generated only by C2 and JVMCI.
171 bool support_wide = is_wide_vector(MaxVectorSize);
172 if (support_wide) {
173 _polling_page_vectors_safepoint_handler_blob =
174 generate_handler_blob(StubId::shared_polling_page_vectors_safepoint_handler_id,
175 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
176 }
177 #endif // COMPILER2_OR_JVMCI
178 _polling_page_safepoint_handler_blob =
179 generate_handler_blob(StubId::shared_polling_page_safepoint_handler_id,
180 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
181 _polling_page_return_handler_blob =
182 generate_handler_blob(StubId::shared_polling_page_return_handler_id,
183 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
184
185 generate_deopt_blob();
186 }
187
188 void SharedRuntime::init_adapter_library() {
189 AdapterHandlerLibrary::initialize();
190 }
191
192 #if INCLUDE_JFR
193 //------------------------------generate jfr runtime stubs ------
194 void SharedRuntime::generate_jfr_stubs() {
195 ResourceMark rm;
196 const char* timer_msg = "SharedRuntime generate_jfr_stubs";
197 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
198
199 _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
200 _jfr_return_lease_blob = generate_jfr_return_lease();
201 }
202
203 #endif // INCLUDE_JFR
204
205 #include <math.h>
206
207 // Implementation of SharedRuntime
208
209 #ifndef PRODUCT
210 // For statistics
211 uint SharedRuntime::_ic_miss_ctr = 0;
212 uint SharedRuntime::_wrong_method_ctr = 0;
213 uint SharedRuntime::_resolve_static_ctr = 0;
214 uint SharedRuntime::_resolve_virtual_ctr = 0;
215 uint SharedRuntime::_resolve_opt_virtual_ctr = 0;
216 uint SharedRuntime::_implicit_null_throws = 0;
217 uint SharedRuntime::_implicit_div0_throws = 0;
218
219 int64_t SharedRuntime::_nof_normal_calls = 0;
220 int64_t SharedRuntime::_nof_inlined_calls = 0;
221 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
222 int64_t SharedRuntime::_nof_static_calls = 0;
223 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
224 int64_t SharedRuntime::_nof_interface_calls = 0;
225 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
226
227 uint SharedRuntime::_new_instance_ctr=0;
228 uint SharedRuntime::_new_array_ctr=0;
229 uint SharedRuntime::_multi2_ctr=0;
230 uint SharedRuntime::_multi3_ctr=0;
231 uint SharedRuntime::_multi4_ctr=0;
232 uint SharedRuntime::_multi5_ctr=0;
233 uint SharedRuntime::_mon_enter_stub_ctr=0;
234 uint SharedRuntime::_mon_exit_stub_ctr=0;
235 uint SharedRuntime::_mon_enter_ctr=0;
236 uint SharedRuntime::_mon_exit_ctr=0;
237 uint SharedRuntime::_partial_subtype_ctr=0;
238 uint SharedRuntime::_jbyte_array_copy_ctr=0;
239 uint SharedRuntime::_jshort_array_copy_ctr=0;
240 uint SharedRuntime::_jint_array_copy_ctr=0;
241 uint SharedRuntime::_jlong_array_copy_ctr=0;
242 uint SharedRuntime::_oop_array_copy_ctr=0;
243 uint SharedRuntime::_checkcast_array_copy_ctr=0;
244 uint SharedRuntime::_unsafe_array_copy_ctr=0;
245 uint SharedRuntime::_generic_array_copy_ctr=0;
246 uint SharedRuntime::_slow_array_copy_ctr=0;
247 uint SharedRuntime::_find_handler_ctr=0;
248 uint SharedRuntime::_rethrow_ctr=0;
249 uint SharedRuntime::_unsafe_set_memory_ctr=0;
250
251 int SharedRuntime::_ICmiss_index = 0;
252 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
253 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
254
255
256 void SharedRuntime::trace_ic_miss(address at) {
257 for (int i = 0; i < _ICmiss_index; i++) {
258 if (_ICmiss_at[i] == at) {
259 _ICmiss_count[i]++;
260 return;
261 }
262 }
263 int index = _ICmiss_index++;
264 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
265 _ICmiss_at[index] = at;
266 _ICmiss_count[index] = 1;
267 }
268
269 void SharedRuntime::print_ic_miss_histogram() {
270 if (ICMissHistogram) {
271 tty->print_cr("IC Miss Histogram:");
272 int tot_misses = 0;
273 for (int i = 0; i < _ICmiss_index; i++) {
274 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
275 tot_misses += _ICmiss_count[i];
276 }
277 tty->print_cr("Total IC misses: %7d", tot_misses);
278 }
279 }
280
281 #ifdef COMPILER2
282 // Runtime methods for printf-style debug nodes (same printing format as fieldDescriptor::print_on_for)
283 void SharedRuntime::debug_print_value(jboolean x) {
284 tty->print_cr("boolean %d", x);
285 }
286
287 void SharedRuntime::debug_print_value(jbyte x) {
288 tty->print_cr("byte %d", x);
289 }
290
291 void SharedRuntime::debug_print_value(jshort x) {
292 tty->print_cr("short %d", x);
293 }
294
295 void SharedRuntime::debug_print_value(jchar x) {
296 tty->print_cr("char %c %d", isprint(x) ? x : ' ', x);
297 }
298
299 void SharedRuntime::debug_print_value(jint x) {
300 tty->print_cr("int %d", x);
301 }
302
303 void SharedRuntime::debug_print_value(jlong x) {
304 tty->print_cr("long " JLONG_FORMAT, x);
305 }
306
307 void SharedRuntime::debug_print_value(jfloat x) {
308 tty->print_cr("float %f", x);
309 }
310
311 void SharedRuntime::debug_print_value(jdouble x) {
312 tty->print_cr("double %lf", x);
313 }
314
315 void SharedRuntime::debug_print_value(oopDesc* x) {
316 x->print();
317 }
318 #endif // COMPILER2
319
320 #endif // PRODUCT
321
322
323 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
324 return x * y;
325 JRT_END
326
327
328 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
329 if (x == min_jlong && y == CONST64(-1)) {
330 return x;
331 } else {
332 return x / y;
333 }
334 JRT_END
335
336
337 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
338 if (x == min_jlong && y == CONST64(-1)) {
339 return 0;
340 } else {
341 return x % y;
342 }
343 JRT_END
344
345
346 #ifdef _WIN64
347 const juint float_sign_mask = 0x7FFFFFFF;
348 const juint float_infinity = 0x7F800000;
349 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
350 const julong double_infinity = CONST64(0x7FF0000000000000);
351 #endif
352
353 #if !defined(X86)
354 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
355 #ifdef _WIN64
356 // 64-bit Windows on amd64 returns the wrong values for
357 // infinity operands.
358 juint xbits = PrimitiveConversions::cast<juint>(x);
359 juint ybits = PrimitiveConversions::cast<juint>(y);
360 // x Mod Infinity == x unless x is infinity
361 if (((xbits & float_sign_mask) != float_infinity) &&
362 ((ybits & float_sign_mask) == float_infinity) ) {
363 return x;
364 }
365 return ((jfloat)fmod_winx64((double)x, (double)y));
366 #else
367 return ((jfloat)fmod((double)x,(double)y));
368 #endif
369 JRT_END
370
371 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
372 #ifdef _WIN64
373 julong xbits = PrimitiveConversions::cast<julong>(x);
374 julong ybits = PrimitiveConversions::cast<julong>(y);
375 // x Mod Infinity == x unless x is infinity
376 if (((xbits & double_sign_mask) != double_infinity) &&
377 ((ybits & double_sign_mask) == double_infinity) ) {
378 return x;
379 }
380 return ((jdouble)fmod_winx64((double)x, (double)y));
381 #else
382 return ((jdouble)fmod((double)x,(double)y));
383 #endif
384 JRT_END
385 #endif // !X86
386
387 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
388 return (jfloat)x;
389 JRT_END
390
391 #ifdef __SOFTFP__
392 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
393 return x + y;
394 JRT_END
395
396 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
397 return x - y;
398 JRT_END
399
400 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
401 return x * y;
402 JRT_END
403
404 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
405 return x / y;
406 JRT_END
407
408 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
409 return x + y;
410 JRT_END
411
412 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
413 return x - y;
414 JRT_END
415
416 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
417 return x * y;
418 JRT_END
419
420 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
421 return x / y;
422 JRT_END
423
424 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
425 return (jdouble)x;
426 JRT_END
427
428 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
429 return (jdouble)x;
430 JRT_END
431
432 JRT_LEAF(int, SharedRuntime::fcmpl(float x, float y))
433 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan*/
434 JRT_END
435
436 JRT_LEAF(int, SharedRuntime::fcmpg(float x, float y))
437 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
438 JRT_END
439
440 JRT_LEAF(int, SharedRuntime::dcmpl(double x, double y))
441 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
442 JRT_END
443
444 JRT_LEAF(int, SharedRuntime::dcmpg(double x, double y))
445 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
446 JRT_END
447
448 // Functions to return the opposite of the aeabi functions for nan.
449 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
450 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
451 JRT_END
452
453 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
454 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
455 JRT_END
456
457 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
458 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
459 JRT_END
460
461 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
462 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
463 JRT_END
464
465 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
466 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
467 JRT_END
468
469 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
470 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
471 JRT_END
472
473 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
474 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
475 JRT_END
476
477 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
478 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
479 JRT_END
480
481 // Intrinsics make gcc generate code for these.
482 float SharedRuntime::fneg(float f) {
483 return -f;
484 }
485
486 double SharedRuntime::dneg(double f) {
487 return -f;
488 }
489
490 #endif // __SOFTFP__
491
492 #if defined(__SOFTFP__) || defined(E500V2)
493 // Intrinsics make gcc generate code for these.
494 double SharedRuntime::dabs(double f) {
495 return (f <= (double)0.0) ? (double)0.0 - f : f;
496 }
497
498 #endif
499
500 #if defined(__SOFTFP__)
501 double SharedRuntime::dsqrt(double f) {
502 return sqrt(f);
503 }
504 #endif
505
506 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
507 if (g_isnan(x))
508 return 0;
509 if (x >= (jfloat) max_jint)
510 return max_jint;
511 if (x <= (jfloat) min_jint)
512 return min_jint;
513 return (jint) x;
514 JRT_END
515
516
517 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
518 if (g_isnan(x))
519 return 0;
520 if (x >= (jfloat) max_jlong)
521 return max_jlong;
522 if (x <= (jfloat) min_jlong)
523 return min_jlong;
524 return (jlong) x;
525 JRT_END
526
527
528 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
529 if (g_isnan(x))
530 return 0;
531 if (x >= (jdouble) max_jint)
532 return max_jint;
533 if (x <= (jdouble) min_jint)
534 return min_jint;
535 return (jint) x;
536 JRT_END
537
538
539 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
540 if (g_isnan(x))
541 return 0;
542 if (x >= (jdouble) max_jlong)
543 return max_jlong;
544 if (x <= (jdouble) min_jlong)
545 return min_jlong;
546 return (jlong) x;
547 JRT_END
548
549
550 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
551 return (jfloat)x;
552 JRT_END
553
554
555 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
556 return (jfloat)x;
557 JRT_END
558
559
560 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
561 return (jdouble)x;
562 JRT_END
563
564
565 // Exception handling across interpreter/compiler boundaries
566 //
567 // exception_handler_for_return_address(...) returns the continuation address.
568 // The continuation address is the entry point of the exception handler of the
569 // previous frame depending on the return address.
570
571 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
572 // Note: This is called when we have unwound the frame of the callee that did
573 // throw an exception. So far, no check has been performed by the StackWatermarkSet.
574 // Notably, the stack is not walkable at this point, and hence the check must
575 // be deferred until later. Specifically, any of the handlers returned here in
576 // this function, will get dispatched to, and call deferred checks to
577 // StackWatermarkSet::after_unwind at a point where the stack is walkable.
578 assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
579 assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
580
581 #if INCLUDE_JVMCI
582 // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
583 // and other exception handler continuations do not read it
584 current->set_exception_pc(nullptr);
585 #endif // INCLUDE_JVMCI
586
587 if (Continuation::is_return_barrier_entry(return_address)) {
588 return StubRoutines::cont_returnBarrierExc();
589 }
590
591 // The fastest case first
592 CodeBlob* blob = CodeCache::find_blob(return_address);
593 nmethod* nm = (blob != nullptr) ? blob->as_nmethod_or_null() : nullptr;
594 if (nm != nullptr) {
595 // native nmethods don't have exception handlers
596 assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
597 assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
598 if (nm->is_deopt_pc(return_address)) {
599 // If we come here because of a stack overflow, the stack may be
600 // unguarded. Reguard the stack otherwise if we return to the
601 // deopt blob and the stack bang causes a stack overflow we
602 // crash.
603 StackOverflow* overflow_state = current->stack_overflow_state();
604 bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
605 if (overflow_state->reserved_stack_activation() != current->stack_base()) {
606 overflow_state->set_reserved_stack_activation(current->stack_base());
607 }
608 assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
609 // The deferred StackWatermarkSet::after_unwind check will be performed in
610 // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
611 return SharedRuntime::deopt_blob()->unpack_with_exception();
612 } else {
613 // The deferred StackWatermarkSet::after_unwind check will be performed in
614 // * OptoRuntime::handle_exception_C_helper for C2 code
615 // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
616 #ifdef COMPILER2
617 if (nm->compiler_type() == compiler_c2) {
618 return OptoRuntime::exception_blob()->entry_point();
619 }
620 #endif // COMPILER2
621 return nm->exception_begin();
622 }
623 }
624
625 // Entry code
626 if (StubRoutines::returns_to_call_stub(return_address)) {
627 // The deferred StackWatermarkSet::after_unwind check will be performed in
628 // JavaCallWrapper::~JavaCallWrapper
629 assert (StubRoutines::catch_exception_entry() != nullptr, "must be generated before");
630 return StubRoutines::catch_exception_entry();
631 }
632 if (blob != nullptr && blob->is_upcall_stub()) {
633 return StubRoutines::upcall_stub_exception_handler();
634 }
635 // Interpreted code
636 if (Interpreter::contains(return_address)) {
637 // The deferred StackWatermarkSet::after_unwind check will be performed in
638 // InterpreterRuntime::exception_handler_for_exception
639 return Interpreter::rethrow_exception_entry();
640 }
641
642 guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
643 guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
644
645 #ifndef PRODUCT
646 { ResourceMark rm;
647 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
648 os::print_location(tty, (intptr_t)return_address);
649 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
650 tty->print_cr("b) other problem");
651 }
652 #endif // PRODUCT
653 ShouldNotReachHere();
654 return nullptr;
655 }
656
657
658 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
659 return raw_exception_handler_for_return_address(current, return_address);
660 JRT_END
661
662
663 address SharedRuntime::get_poll_stub(address pc) {
664 address stub;
665 // Look up the code blob
666 CodeBlob *cb = CodeCache::find_blob(pc);
667
668 // Should be an nmethod
669 guarantee(cb != nullptr && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod");
670
671 // Look up the relocation information
672 assert(cb->as_nmethod()->is_at_poll_or_poll_return(pc),
673 "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
674
675 #ifdef ASSERT
676 if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
677 tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
678 Disassembler::decode(cb);
679 fatal("Only polling locations are used for safepoint");
680 }
681 #endif
682
683 bool at_poll_return = cb->as_nmethod()->is_at_poll_return(pc);
684 bool has_wide_vectors = cb->as_nmethod()->has_wide_vectors();
685 if (at_poll_return) {
686 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
687 "polling page return stub not created yet");
688 stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
689 } else if (has_wide_vectors) {
690 assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr,
691 "polling page vectors safepoint stub not created yet");
692 stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
693 } else {
694 assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr,
695 "polling page safepoint stub not created yet");
696 stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
697 }
698 log_trace(safepoint)("Polling page exception: thread = " INTPTR_FORMAT " [%d], pc = "
699 INTPTR_FORMAT " (%s), stub = " INTPTR_FORMAT,
700 p2i(Thread::current()),
701 Thread::current()->osthread()->thread_id(),
702 p2i(pc),
703 at_poll_return ? "return" : "loop",
704 p2i(stub));
705 return stub;
706 }
707
708 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
709 if (JvmtiExport::can_post_on_exceptions()) {
710 vframeStream vfst(current, true);
711 methodHandle method = methodHandle(current, vfst.method());
712 address bcp = method()->bcp_from(vfst.bci());
713 JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
714 }
715
716 #if INCLUDE_JVMCI
717 if (EnableJVMCI) {
718 vframeStream vfst(current, true);
719 methodHandle method = methodHandle(current, vfst.method());
720 int bci = vfst.bci();
721 MethodData* trap_mdo = method->method_data();
722 if (trap_mdo != nullptr) {
723 // Set exception_seen if the exceptional bytecode is an invoke
724 Bytecode_invoke call = Bytecode_invoke_check(method, bci);
725 if (call.is_valid()) {
726 ResourceMark rm(current);
727
728 // Lock to read ProfileData, and ensure lock is not broken by a safepoint
729 MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
730
731 ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
732 if (pdata != nullptr && pdata->is_BitData()) {
733 BitData* bit_data = (BitData*) pdata;
734 bit_data->set_exception_seen();
735 }
736 }
737 }
738 }
739 #endif
740
741 Exceptions::_throw(current, __FILE__, __LINE__, h_exception);
742 }
743
744 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message) {
745 Handle h_exception = Exceptions::new_exception(current, name, message);
746 throw_and_post_jvmti_exception(current, h_exception);
747 }
748
749 // The interpreter code to call this tracing function is only
750 // called/generated when UL is on for redefine, class and has the right level
751 // and tags. Since obsolete methods are never compiled, we don't have
752 // to modify the compilers to generate calls to this function.
753 //
754 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
755 JavaThread* thread, Method* method))
756 if (method->is_obsolete()) {
757 // We are calling an obsolete method, but this is not necessarily
758 // an error. Our method could have been redefined just after we
759 // fetched the Method* from the constant pool.
760 ResourceMark rm;
761 log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
762 }
763 return 0;
764 JRT_END
765
766 // ret_pc points into caller; we are returning caller's exception handler
767 // for given exception
768 // Note that the implementation of this method assumes it's only called when an exception has actually occured
769 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
770 bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
771 assert(nm != nullptr, "must exist");
772 ResourceMark rm;
773
774 #if INCLUDE_JVMCI
775 if (nm->is_compiled_by_jvmci()) {
776 // lookup exception handler for this pc
777 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
778 ExceptionHandlerTable table(nm);
779 HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
780 if (t != nullptr) {
781 return nm->code_begin() + t->pco();
782 } else {
783 bool make_not_entrant = true;
784 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
785 }
786 }
787 #endif // INCLUDE_JVMCI
788
789 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
790 // determine handler bci, if any
791 EXCEPTION_MARK;
792
793 Handle orig_exception(THREAD, exception());
794
795 int handler_bci = -1;
796 int scope_depth = 0;
797 if (!force_unwind) {
798 int bci = sd->bci();
799 bool recursive_exception = false;
800 do {
801 bool skip_scope_increment = false;
802 // exception handler lookup
803 Klass* ek = exception->klass();
804 methodHandle mh(THREAD, sd->method());
805 handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD);
806 if (HAS_PENDING_EXCEPTION) {
807 recursive_exception = true;
808 // We threw an exception while trying to find the exception handler.
809 // Transfer the new exception to the exception handle which will
810 // be set into thread local storage, and do another lookup for an
811 // exception handler for this exception, this time starting at the
812 // BCI of the exception handler which caused the exception to be
813 // thrown (bugs 4307310 and 4546590). Set "exception" reference
814 // argument to ensure that the correct exception is thrown (4870175).
815 recursive_exception_occurred = true;
816 exception.replace(PENDING_EXCEPTION);
817 CLEAR_PENDING_EXCEPTION;
818 if (handler_bci >= 0) {
819 bci = handler_bci;
820 handler_bci = -1;
821 skip_scope_increment = true;
822 }
823 }
824 else {
825 recursive_exception = false;
826 }
827 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
828 sd = sd->sender();
829 if (sd != nullptr) {
830 bci = sd->bci();
831 }
832 ++scope_depth;
833 }
834 } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr));
835 }
836
837 // found handling method => lookup exception handler
838 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
839
840 ExceptionHandlerTable table(nm);
841 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
842
843 // If the compiler did not anticipate a recursive exception, resulting in an exception
844 // thrown from the catch bci, then the compiled exception handler might be missing.
845 // This is rare. Just deoptimize and let the interpreter rethrow the original
846 // exception at the original bci.
847 if (t == nullptr && recursive_exception_occurred) {
848 exception.replace(orig_exception()); // restore original exception
849 bool make_not_entrant = false;
850 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
851 }
852
853 if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
854 // Allow abbreviated catch tables. The idea is to allow a method
855 // to materialize its exceptions without committing to the exact
856 // routing of exceptions. In particular this is needed for adding
857 // a synthetic handler to unlock monitors when inlining
858 // synchronized methods since the unlock path isn't represented in
859 // the bytecodes.
860 t = table.entry_for(catch_pco, -1, 0);
861 }
862
863 #ifdef COMPILER1
864 if (t == nullptr && nm->is_compiled_by_c1()) {
865 assert(nm->unwind_handler_begin() != nullptr, "");
866 return nm->unwind_handler_begin();
867 }
868 #endif
869
870 if (t == nullptr) {
871 ttyLocker ttyl;
872 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
873 tty->print_cr(" Exception:");
874 exception->print();
875 tty->cr();
876 tty->print_cr(" Compiled exception table :");
877 table.print();
878 nm->print();
879 nm->print_code();
880 guarantee(false, "missing exception handler");
881 return nullptr;
882 }
883
884 if (handler_bci != -1) { // did we find a handler in this method?
885 sd->method()->set_exception_handler_entered(handler_bci); // profile
886 }
887 return nm->code_begin() + t->pco();
888 }
889
890 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
891 // These errors occur only at call sites
892 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
893 JRT_END
894
895 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
896 // These errors occur only at call sites
897 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
898 JRT_END
899
900 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
901 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
902 JRT_END
903
904 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current))
905 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
906 JRT_END
907
908 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current))
909 // This entry point is effectively only used for NullPointerExceptions which occur at inline
910 // cache sites (when the callee activation is not yet set up) so we are at a call site
911 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
912 JRT_END
913
914 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current))
915 throw_StackOverflowError_common(current, false);
916 JRT_END
917
918 JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* current))
919 throw_StackOverflowError_common(current, true);
920 JRT_END
921
922 void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool delayed) {
923 // We avoid using the normal exception construction in this case because
924 // it performs an upcall to Java, and we're already out of stack space.
925 JavaThread* THREAD = current; // For exception macros.
926 InstanceKlass* k = vmClasses::StackOverflowError_klass();
927 oop exception_oop = k->allocate_instance(CHECK);
928 if (delayed) {
929 java_lang_Throwable::set_message(exception_oop,
930 Universe::delayed_stack_overflow_error_message());
931 }
932 Handle exception (current, exception_oop);
933 if (StackTraceInThrowable) {
934 java_lang_Throwable::fill_in_stack_trace(exception);
935 }
936 // Remove the ScopedValue bindings in case we got a
937 // StackOverflowError while we were trying to remove ScopedValue
938 // bindings.
939 current->clear_scopedValueBindings();
940 // Increment counter for hs_err file reporting
941 Exceptions::increment_stack_overflow_errors();
942 throw_and_post_jvmti_exception(current, exception);
943 }
944
945 address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
946 address pc,
947 ImplicitExceptionKind exception_kind)
948 {
949 address target_pc = nullptr;
950
951 if (Interpreter::contains(pc)) {
952 switch (exception_kind) {
953 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
954 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
955 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
956 default: ShouldNotReachHere();
957 }
958 } else {
959 switch (exception_kind) {
960 case STACK_OVERFLOW: {
961 // Stack overflow only occurs upon frame setup; the callee is
962 // going to be unwound. Dispatch to a shared runtime stub
963 // which will cause the StackOverflowError to be fabricated
964 // and processed.
965 // Stack overflow should never occur during deoptimization:
966 // the compiled method bangs the stack by as much as the
967 // interpreter would need in case of a deoptimization. The
968 // deoptimization blob and uncommon trap blob bang the stack
969 // in a debug VM to verify the correctness of the compiled
970 // method stack banging.
971 assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
972 Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
973 return SharedRuntime::throw_StackOverflowError_entry();
974 }
975
976 case IMPLICIT_NULL: {
977 if (VtableStubs::contains(pc)) {
978 // We haven't yet entered the callee frame. Fabricate an
979 // exception and begin dispatching it in the caller. Since
980 // the caller was at a call site, it's safe to destroy all
981 // caller-saved registers, as these entry points do.
982 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
983
984 // If vt_stub is null, then return null to signal handler to report the SEGV error.
985 if (vt_stub == nullptr) return nullptr;
986
987 if (vt_stub->is_abstract_method_error(pc)) {
988 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
989 Events::log_exception(current, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
990 // Instead of throwing the abstract method error here directly, we re-resolve
991 // and will throw the AbstractMethodError during resolve. As a result, we'll
992 // get a more detailed error message.
993 return SharedRuntime::get_handle_wrong_method_stub();
994 } else {
995 Events::log_exception(current, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
996 // Assert that the signal comes from the expected location in stub code.
997 assert(vt_stub->is_null_pointer_exception(pc),
998 "obtained signal from unexpected location in stub code");
999 return SharedRuntime::throw_NullPointerException_at_call_entry();
1000 }
1001 } else {
1002 CodeBlob* cb = CodeCache::find_blob(pc);
1003
1004 // If code blob is null, then return null to signal handler to report the SEGV error.
1005 if (cb == nullptr) return nullptr;
1006
1007 // Exception happened in CodeCache. Must be either:
1008 // 1. Inline-cache check in C2I handler blob,
1009 // 2. Inline-cache check in nmethod, or
1010 // 3. Implicit null exception in nmethod
1011
1012 if (!cb->is_nmethod()) {
1013 bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
1014 if (!is_in_blob) {
1015 // Allow normal crash reporting to handle this
1016 return nullptr;
1017 }
1018 Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
1019 // There is no handler here, so we will simply unwind.
1020 return SharedRuntime::throw_NullPointerException_at_call_entry();
1021 }
1022
1023 // Otherwise, it's a compiled method. Consult its exception handlers.
1024 nmethod* nm = cb->as_nmethod();
1025 if (nm->inlinecache_check_contains(pc)) {
1026 // exception happened inside inline-cache check code
1027 // => the nmethod is not yet active (i.e., the frame
1028 // is not set up yet) => use return address pushed by
1029 // caller => don't push another return address
1030 Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
1031 return SharedRuntime::throw_NullPointerException_at_call_entry();
1032 }
1033
1034 if (nm->method()->is_method_handle_intrinsic()) {
1035 // exception happened inside MH dispatch code, similar to a vtable stub
1036 Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
1037 return SharedRuntime::throw_NullPointerException_at_call_entry();
1038 }
1039
1040 #ifndef PRODUCT
1041 _implicit_null_throws++;
1042 #endif
1043 target_pc = nm->continuation_for_implicit_null_exception(pc);
1044 // If there's an unexpected fault, target_pc might be null,
1045 // in which case we want to fall through into the normal
1046 // error handling code.
1047 }
1048
1049 break; // fall through
1050 }
1051
1052
1053 case IMPLICIT_DIVIDE_BY_ZERO: {
1054 nmethod* nm = CodeCache::find_nmethod(pc);
1055 guarantee(nm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions");
1056 #ifndef PRODUCT
1057 _implicit_div0_throws++;
1058 #endif
1059 target_pc = nm->continuation_for_implicit_div0_exception(pc);
1060 // If there's an unexpected fault, target_pc might be null,
1061 // in which case we want to fall through into the normal
1062 // error handling code.
1063 break; // fall through
1064 }
1065
1066 default: ShouldNotReachHere();
1067 }
1068
1069 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
1070
1071 if (exception_kind == IMPLICIT_NULL) {
1072 #ifndef PRODUCT
1073 // for AbortVMOnException flag
1074 Exceptions::debug_check_abort("java.lang.NullPointerException");
1075 #endif //PRODUCT
1076 Events::log_exception(current, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1077 } else {
1078 #ifndef PRODUCT
1079 // for AbortVMOnException flag
1080 Exceptions::debug_check_abort("java.lang.ArithmeticException");
1081 #endif //PRODUCT
1082 Events::log_exception(current, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1083 }
1084 return target_pc;
1085 }
1086
1087 ShouldNotReachHere();
1088 return nullptr;
1089 }
1090
1091
1092 /**
1093 * Throws an java/lang/UnsatisfiedLinkError. The address of this method is
1094 * installed in the native function entry of all native Java methods before
1095 * they get linked to their actual native methods.
1096 *
1097 * \note
1098 * This method actually never gets called! The reason is because
1099 * the interpreter's native entries call NativeLookup::lookup() which
1100 * throws the exception when the lookup fails. The exception is then
1101 * caught and forwarded on the return from NativeLookup::lookup() call
1102 * before the call to the native function. This might change in the future.
1103 */
1104 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
1105 {
1106 // We return a bad value here to make sure that the exception is
1107 // forwarded before we look at the return value.
1108 THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
1109 }
1110 JNI_END
1111
1112 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
1113 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
1114 }
1115
1116 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
1117 #if INCLUDE_JVMCI
1118 if (!obj->klass()->has_finalizer()) {
1119 return;
1120 }
1121 #endif // INCLUDE_JVMCI
1122 assert(oopDesc::is_oop(obj), "must be a valid oop");
1123 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1124 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1125 JRT_END
1126
1127 jlong SharedRuntime::get_java_tid(JavaThread* thread) {
1128 assert(thread != nullptr, "No thread");
1129 if (thread == nullptr) {
1130 return 0;
1131 }
1132 guarantee(Thread::current() != thread || thread->is_oop_safe(),
1133 "current cannot touch oops after its GC barrier is detached.");
1134 oop obj = thread->threadObj();
1135 return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj);
1136 }
1137
1138 /**
1139 * This function ought to be a void function, but cannot be because
1140 * it gets turned into a tail-call on sparc, which runs into dtrace bug
1141 * 6254741. Once that is fixed we can remove the dummy return value.
1142 */
1143 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1144 return dtrace_object_alloc(JavaThread::current(), o, o->size());
1145 }
1146
1147 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o) {
1148 return dtrace_object_alloc(thread, o, o->size());
1149 }
1150
1151 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size) {
1152 assert(DTraceAllocProbes, "wrong call");
1153 Klass* klass = o->klass();
1154 Symbol* name = klass->name();
1155 HOTSPOT_OBJECT_ALLOC(
1156 get_java_tid(thread),
1157 (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
1158 return 0;
1159 }
1160
1161 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
1162 JavaThread* current, Method* method))
1163 assert(current == JavaThread::current(), "pre-condition");
1164
1165 assert(DTraceMethodProbes, "wrong call");
1166 Symbol* kname = method->klass_name();
1167 Symbol* name = method->name();
1168 Symbol* sig = method->signature();
1169 HOTSPOT_METHOD_ENTRY(
1170 get_java_tid(current),
1171 (char *) kname->bytes(), kname->utf8_length(),
1172 (char *) name->bytes(), name->utf8_length(),
1173 (char *) sig->bytes(), sig->utf8_length());
1174 return 0;
1175 JRT_END
1176
1177 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
1178 JavaThread* current, Method* method))
1179 assert(current == JavaThread::current(), "pre-condition");
1180 assert(DTraceMethodProbes, "wrong call");
1181 Symbol* kname = method->klass_name();
1182 Symbol* name = method->name();
1183 Symbol* sig = method->signature();
1184 HOTSPOT_METHOD_RETURN(
1185 get_java_tid(current),
1186 (char *) kname->bytes(), kname->utf8_length(),
1187 (char *) name->bytes(), name->utf8_length(),
1188 (char *) sig->bytes(), sig->utf8_length());
1189 return 0;
1190 JRT_END
1191
1192
1193 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
1194 // for a call current in progress, i.e., arguments has been pushed on stack
1195 // put callee has not been invoked yet. Used by: resolve virtual/static,
1196 // vtable updates, etc. Caller frame must be compiled.
1197 Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1198 JavaThread* current = THREAD;
1199 ResourceMark rm(current);
1200
1201 // last java frame on stack (which includes native call frames)
1202 vframeStream vfst(current, true); // Do not skip and javaCalls
1203
1204 return find_callee_info_helper(vfst, bc, callinfo, THREAD);
1205 }
1206
1207 Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
1208 nmethod* caller = vfst.nm();
1209
1210 address pc = vfst.frame_pc();
1211 { // Get call instruction under lock because another thread may be busy patching it.
1212 CompiledICLocker ic_locker(caller);
1213 return caller->attached_method_before_pc(pc);
1214 }
1215 return nullptr;
1216 }
1217
1218 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1219 // for a call current in progress, i.e., arguments has been pushed on stack
1220 // but callee has not been invoked yet. Caller frame must be compiled.
1221 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1222 CallInfo& callinfo, TRAPS) {
1223 Handle receiver;
1224 Handle nullHandle; // create a handy null handle for exception returns
1225 JavaThread* current = THREAD;
1226
1227 assert(!vfst.at_end(), "Java frame must exist");
1228
1229 // Find caller and bci from vframe
1230 methodHandle caller(current, vfst.method());
1231 int bci = vfst.bci();
1232
1233 if (caller->is_continuation_enter_intrinsic()) {
1234 bc = Bytecodes::_invokestatic;
1235 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1236 return receiver;
1237 }
1238
1239 // Substitutability test implementation piggy backs on static call resolution
1240 Bytecodes::Code code = caller->java_code_at(bci);
1241 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1242 bc = Bytecodes::_invokestatic;
1243 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1244 assert(attached_method.not_null(), "must have attached method");
1245 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1246 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1247 #ifdef ASSERT
1248 Symbol* subst_method_name = vmSymbols::isSubstitutable_name();
1249 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(subst_method_name, vmSymbols::object_object_boolean_signature());
1250 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1251 #endif
1252 return receiver;
1253 }
1254
1255 Bytecode_invoke bytecode(caller, bci);
1256 int bytecode_index = bytecode.index();
1257 bc = bytecode.invoke_code();
1258
1259 methodHandle attached_method(current, extract_attached_method(vfst));
1260 if (attached_method.not_null()) {
1261 Method* callee = bytecode.static_target(CHECK_NH);
1262 vmIntrinsics::ID id = callee->intrinsic_id();
1263 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1264 // it attaches statically resolved method to the call site.
1265 if (MethodHandles::is_signature_polymorphic(id) &&
1266 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1267 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1268
1269 // Adjust invocation mode according to the attached method.
1270 switch (bc) {
1271 case Bytecodes::_invokevirtual:
1272 if (attached_method->method_holder()->is_interface()) {
1273 bc = Bytecodes::_invokeinterface;
1274 }
1275 break;
1276 case Bytecodes::_invokeinterface:
1277 if (!attached_method->method_holder()->is_interface()) {
1278 bc = Bytecodes::_invokevirtual;
1279 }
1280 break;
1281 case Bytecodes::_invokehandle:
1282 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1283 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1284 : Bytecodes::_invokevirtual;
1285 }
1286 break;
1287 default:
1288 break;
1289 }
1290 } else {
1291 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1292 if (!attached_method->method_holder()->is_inline_klass()) {
1293 // Ignore the attached method in this case to not confuse below code
1294 attached_method = methodHandle(current, nullptr);
1295 }
1296 }
1297 }
1298
1299 assert(bc != Bytecodes::_illegal, "not initialized");
1300
1301 bool has_receiver = bc != Bytecodes::_invokestatic &&
1302 bc != Bytecodes::_invokedynamic &&
1303 bc != Bytecodes::_invokehandle;
1304 bool check_null_and_abstract = true;
1305
1306 // Find receiver for non-static call
1307 if (has_receiver) {
1308 // This register map must be update since we need to find the receiver for
1309 // compiled frames. The receiver might be in a register.
1310 RegisterMap reg_map2(current,
1311 RegisterMap::UpdateMap::include,
1312 RegisterMap::ProcessFrames::include,
1313 RegisterMap::WalkContinuation::skip);
1314 frame stubFrame = current->last_frame();
1315 // Caller-frame is a compiled frame
1316 frame callerFrame = stubFrame.sender(®_map2);
1317
1318 Method* callee = attached_method();
1319 if (callee == nullptr) {
1320 callee = bytecode.static_target(CHECK_NH);
1321 if (callee == nullptr) {
1322 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1323 }
1324 }
1325 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1326 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1327 // If the receiver is an inline type that is passed as fields, no oop is available
1328 // Resolve the call without receiver null checking.
1329 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1330 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1331 if (bc == Bytecodes::_invokeinterface) {
1332 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1333 }
1334 check_null_and_abstract = false;
1335 } else {
1336 // Retrieve from a compiled argument list
1337 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1338 assert(oopDesc::is_oop_or_null(receiver()), "");
1339 if (receiver.is_null()) {
1340 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1341 }
1342 }
1343 }
1344
1345 // Resolve method
1346 if (attached_method.not_null()) {
1347 // Parameterized by attached method.
1348 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1349 } else {
1350 // Parameterized by bytecode.
1351 constantPoolHandle constants(current, caller->constants());
1352 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1353 }
1354
1355 #ifdef ASSERT
1356 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1357 if (has_receiver && check_null_and_abstract) {
1358 assert(receiver.not_null(), "should have thrown exception");
1359 Klass* receiver_klass = receiver->klass();
1360 Klass* rk = nullptr;
1361 if (attached_method.not_null()) {
1362 // In case there's resolved method attached, use its holder during the check.
1363 rk = attached_method->method_holder();
1364 } else {
1365 // Klass is already loaded.
1366 constantPoolHandle constants(current, caller->constants());
1367 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1368 }
1369 Klass* static_receiver_klass = rk;
1370 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1371 "actual receiver must be subclass of static receiver klass");
1372 if (receiver_klass->is_instance_klass()) {
1373 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1374 tty->print_cr("ERROR: Klass not yet initialized!!");
1375 receiver_klass->print();
1376 }
1377 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1378 }
1379 }
1380 #endif
1381
1382 return receiver;
1383 }
1384
1385 methodHandle SharedRuntime::find_callee_method(bool& caller_does_not_scalarize, TRAPS) {
1386 JavaThread* current = THREAD;
1387 ResourceMark rm(current);
1388 // We need first to check if any Java activations (compiled, interpreted)
1389 // exist on the stack since last JavaCall. If not, we need
1390 // to get the target method from the JavaCall wrapper.
1391 vframeStream vfst(current, true); // Do not skip any javaCalls
1392 methodHandle callee_method;
1393 if (vfst.at_end()) {
1394 // No Java frames were found on stack since we did the JavaCall.
1395 // Hence the stack can only contain an entry_frame. We need to
1396 // find the target method from the stub frame.
1397 RegisterMap reg_map(current,
1398 RegisterMap::UpdateMap::skip,
1399 RegisterMap::ProcessFrames::include,
1400 RegisterMap::WalkContinuation::skip);
1401 frame fr = current->last_frame();
1402 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1403 fr = fr.sender(®_map);
1404 assert(fr.is_entry_frame(), "must be");
1405 // fr is now pointing to the entry frame.
1406 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1407 } else {
1408 Bytecodes::Code bc;
1409 CallInfo callinfo;
1410 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1411 // Calls via mismatching methods are always non-scalarized
1412 if (callinfo.resolved_method()->mismatch()) {
1413 caller_does_not_scalarize = true;
1414 }
1415 callee_method = methodHandle(current, callinfo.selected_method());
1416 }
1417 assert(callee_method()->is_method(), "must be");
1418 return callee_method;
1419 }
1420
1421 // Resolves a call.
1422 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1423 JavaThread* current = THREAD;
1424 ResourceMark rm(current);
1425 RegisterMap cbl_map(current,
1426 RegisterMap::UpdateMap::skip,
1427 RegisterMap::ProcessFrames::include,
1428 RegisterMap::WalkContinuation::skip);
1429 frame caller_frame = current->last_frame().sender(&cbl_map);
1430
1431 CodeBlob* caller_cb = caller_frame.cb();
1432 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1433 nmethod* caller_nm = caller_cb->as_nmethod();
1434
1435 // determine call info & receiver
1436 // note: a) receiver is null for static calls
1437 // b) an exception is thrown if receiver is null for non-static calls
1438 CallInfo call_info;
1439 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1440 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1441
1442 NoSafepointVerifier nsv;
1443
1444 methodHandle callee_method(current, call_info.selected_method());
1445 // Calls via mismatching methods are always non-scalarized
1446 bool mismatch = is_optimized ? call_info.selected_method()->mismatch() : call_info.resolved_method()->mismatch();
1447 if (caller_nm->is_compiled_by_c1() || mismatch) {
1448 caller_does_not_scalarize = true;
1449 }
1450
1451 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1452 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1453 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1454 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1455 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1456
1457 assert(!caller_nm->is_unloading(), "It should not be unloading");
1458
1459 #ifndef PRODUCT
1460 // tracing/debugging/statistics
1461 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1462 (is_virtual) ? (&_resolve_virtual_ctr) :
1463 (&_resolve_static_ctr);
1464 AtomicAccess::inc(addr);
1465
1466 if (TraceCallFixup) {
1467 ResourceMark rm(current);
1468 tty->print("resolving %s%s (%s) %s call to",
1469 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1470 Bytecodes::name(invoke_code), (caller_does_not_scalarize) ? "non-scalar" : "");
1471 callee_method->print_short_name(tty);
1472 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1473 p2i(caller_frame.pc()), p2i(callee_method->code()));
1474 }
1475 #endif
1476
1477 if (invoke_code == Bytecodes::_invokestatic) {
1478 assert(callee_method->method_holder()->is_initialized() ||
1479 callee_method->method_holder()->is_reentrant_initialization(current),
1480 "invalid class initialization state for invoke_static");
1481 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1482 // In order to keep class initialization check, do not patch call
1483 // site for static call when the class is not fully initialized.
1484 // Proper check is enforced by call site re-resolution on every invocation.
1485 //
1486 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1487 // explicit class initialization check is put in nmethod entry (VEP).
1488 assert(callee_method->method_holder()->is_linked(), "must be");
1489 return callee_method;
1490 }
1491 }
1492
1493
1494 // JSR 292 key invariant:
1495 // If the resolved method is a MethodHandle invoke target, the call
1496 // site must be a MethodHandle call site, because the lambda form might tail-call
1497 // leaving the stack in a state unknown to either caller or callee
1498
1499 // Compute entry points. The computation of the entry points is independent of
1500 // patching the call.
1501
1502 // Make sure the callee nmethod does not get deoptimized and removed before
1503 // we are done patching the code.
1504
1505
1506 CompiledICLocker ml(caller_nm);
1507 if (is_virtual && !is_optimized) {
1508 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1509 inline_cache->update(&call_info, receiver->klass(), caller_does_not_scalarize);
1510 } else {
1511 // Callsite is a direct call - set it to the destination method
1512 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1513 callsite->set(callee_method, caller_does_not_scalarize);
1514 }
1515
1516 return callee_method;
1517 }
1518
1519 // Inline caches exist only in compiled code
1520 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1521 #ifdef ASSERT
1522 RegisterMap reg_map(current,
1523 RegisterMap::UpdateMap::skip,
1524 RegisterMap::ProcessFrames::include,
1525 RegisterMap::WalkContinuation::skip);
1526 frame stub_frame = current->last_frame();
1527 assert(stub_frame.is_runtime_frame(), "sanity check");
1528 frame caller_frame = stub_frame.sender(®_map);
1529 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1530 #endif /* ASSERT */
1531
1532 methodHandle callee_method;
1533 bool caller_does_not_scalarize = false;
1534 JRT_BLOCK
1535 callee_method = SharedRuntime::handle_ic_miss_helper(caller_does_not_scalarize, CHECK_NULL);
1536 // Return Method* through TLS
1537 current->set_vm_result_metadata(callee_method());
1538 JRT_BLOCK_END
1539 // return compiled code entry point after potential safepoints
1540 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1541 JRT_END
1542
1543
1544 // Handle call site that has been made non-entrant
1545 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1546 // 6243940 We might end up in here if the callee is deoptimized
1547 // as we race to call it. We don't want to take a safepoint if
1548 // the caller was interpreted because the caller frame will look
1549 // interpreted to the stack walkers and arguments are now
1550 // "compiled" so it is much better to make this transition
1551 // invisible to the stack walking code. The i2c path will
1552 // place the callee method in the callee_target. It is stashed
1553 // there because if we try and find the callee by normal means a
1554 // safepoint is possible and have trouble gc'ing the compiled args.
1555 RegisterMap reg_map(current,
1556 RegisterMap::UpdateMap::skip,
1557 RegisterMap::ProcessFrames::include,
1558 RegisterMap::WalkContinuation::skip);
1559 frame stub_frame = current->last_frame();
1560 assert(stub_frame.is_runtime_frame(), "sanity check");
1561 frame caller_frame = stub_frame.sender(®_map);
1562
1563 if (caller_frame.is_interpreted_frame() ||
1564 caller_frame.is_entry_frame() ||
1565 caller_frame.is_upcall_stub_frame()) {
1566 Method* callee = current->callee_target();
1567 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1568 current->set_vm_result_metadata(callee);
1569 current->set_callee_target(nullptr);
1570 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1571 // Bypass class initialization checks in c2i when caller is in native.
1572 // JNI calls to static methods don't have class initialization checks.
1573 // Fast class initialization checks are present in c2i adapters and call into
1574 // SharedRuntime::handle_wrong_method() on the slow path.
1575 //
1576 // JVM upcalls may land here as well, but there's a proper check present in
1577 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1578 // so bypassing it in c2i adapter is benign.
1579 return callee->get_c2i_no_clinit_check_entry();
1580 } else {
1581 if (caller_frame.is_interpreted_frame()) {
1582 return callee->get_c2i_inline_entry();
1583 } else {
1584 return callee->get_c2i_entry();
1585 }
1586 }
1587 }
1588
1589 // Must be compiled to compiled path which is safe to stackwalk
1590 methodHandle callee_method;
1591 bool is_static_call = false;
1592 bool is_optimized = false;
1593 bool caller_does_not_scalarize = false;
1594 JRT_BLOCK
1595 // Force resolving of caller (if we called from compiled frame)
1596 callee_method = SharedRuntime::reresolve_call_site(is_optimized, caller_does_not_scalarize, CHECK_NULL);
1597 current->set_vm_result_metadata(callee_method());
1598 JRT_BLOCK_END
1599 // return compiled code entry point after potential safepoints
1600 return get_resolved_entry(current, callee_method, callee_method->is_static(), is_optimized, caller_does_not_scalarize);
1601 JRT_END
1602
1603 // Handle abstract method call
1604 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1605 // Verbose error message for AbstractMethodError.
1606 // Get the called method from the invoke bytecode.
1607 vframeStream vfst(current, true);
1608 assert(!vfst.at_end(), "Java frame must exist");
1609 methodHandle caller(current, vfst.method());
1610 Bytecode_invoke invoke(caller, vfst.bci());
1611 DEBUG_ONLY( invoke.verify(); )
1612
1613 // Find the compiled caller frame.
1614 RegisterMap reg_map(current,
1615 RegisterMap::UpdateMap::include,
1616 RegisterMap::ProcessFrames::include,
1617 RegisterMap::WalkContinuation::skip);
1618 frame stubFrame = current->last_frame();
1619 assert(stubFrame.is_runtime_frame(), "must be");
1620 frame callerFrame = stubFrame.sender(®_map);
1621 assert(callerFrame.is_compiled_frame(), "must be");
1622
1623 // Install exception and return forward entry.
1624 address res = SharedRuntime::throw_AbstractMethodError_entry();
1625 JRT_BLOCK
1626 methodHandle callee(current, invoke.static_target(current));
1627 if (!callee.is_null()) {
1628 oop recv = callerFrame.retrieve_receiver(®_map);
1629 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1630 res = StubRoutines::forward_exception_entry();
1631 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1632 }
1633 JRT_BLOCK_END
1634 return res;
1635 JRT_END
1636
1637 // return verified_code_entry if interp_only_mode is not set for the current thread;
1638 // otherwise return c2i entry.
1639 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1640 bool is_static_call, bool is_optimized, bool caller_does_not_scalarize) {
1641 bool is_interp_only_mode = (StressCallingConvention && (os::random() % (1 << 10)) == 0) || current->is_interp_only_mode();
1642 // In interp_only_mode we need to go to the interpreted entry
1643 // The c2i won't patch in this mode -- see fixup_callers_callsite
1644 bool go_to_interpreter = is_interp_only_mode && !callee_method->is_special_native_intrinsic();
1645
1646 if (caller_does_not_scalarize) {
1647 if (go_to_interpreter) {
1648 return callee_method->get_c2i_inline_entry();
1649 }
1650 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1651 return callee_method->verified_inline_code_entry();
1652 } else if (is_static_call || is_optimized) {
1653 if (go_to_interpreter) {
1654 return callee_method->get_c2i_entry();
1655 }
1656 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1657 return callee_method->verified_code_entry();
1658 } else {
1659 if (go_to_interpreter) {
1660 return callee_method->get_c2i_inline_ro_entry();
1661 }
1662 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1663 return callee_method->verified_inline_ro_code_entry();
1664 }
1665 }
1666
1667 // resolve a static call and patch code
1668 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1669 methodHandle callee_method;
1670 bool caller_does_not_scalarize = false;
1671 bool enter_special = false;
1672 JRT_BLOCK
1673 callee_method = SharedRuntime::resolve_helper(false, false, caller_does_not_scalarize, CHECK_NULL);
1674 current->set_vm_result_metadata(callee_method());
1675 JRT_BLOCK_END
1676 // return compiled code entry point after potential safepoints
1677 return get_resolved_entry(current, callee_method, true, false, caller_does_not_scalarize);
1678 JRT_END
1679
1680 // resolve virtual call and update inline cache to monomorphic
1681 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1682 methodHandle callee_method;
1683 bool caller_does_not_scalarize = false;
1684 JRT_BLOCK
1685 callee_method = SharedRuntime::resolve_helper(true, false, caller_does_not_scalarize, CHECK_NULL);
1686 current->set_vm_result_metadata(callee_method());
1687 JRT_BLOCK_END
1688 // return compiled code entry point after potential safepoints
1689 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1690 JRT_END
1691
1692
1693 // Resolve a virtual call that can be statically bound (e.g., always
1694 // monomorphic, so it has no inline cache). Patch code to resolved target.
1695 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1696 methodHandle callee_method;
1697 bool caller_does_not_scalarize = false;
1698 JRT_BLOCK
1699 callee_method = SharedRuntime::resolve_helper(true, true, caller_does_not_scalarize, CHECK_NULL);
1700 current->set_vm_result_metadata(callee_method());
1701 JRT_BLOCK_END
1702 // return compiled code entry point after potential safepoints
1703 return get_resolved_entry(current, callee_method, false, true, caller_does_not_scalarize);
1704 JRT_END
1705
1706 methodHandle SharedRuntime::handle_ic_miss_helper(bool& caller_does_not_scalarize, TRAPS) {
1707 JavaThread* current = THREAD;
1708 ResourceMark rm(current);
1709 CallInfo call_info;
1710 Bytecodes::Code bc;
1711
1712 // receiver is null for static calls. An exception is thrown for null
1713 // receivers for non-static calls
1714 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1715
1716 methodHandle callee_method(current, call_info.selected_method());
1717
1718 #ifndef PRODUCT
1719 AtomicAccess::inc(&_ic_miss_ctr);
1720
1721 // Statistics & Tracing
1722 if (TraceCallFixup) {
1723 ResourceMark rm(current);
1724 tty->print("IC miss (%s) %s call to", Bytecodes::name(bc), (caller_does_not_scalarize) ? "non-scalar" : "");
1725 callee_method->print_short_name(tty);
1726 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1727 }
1728
1729 if (ICMissHistogram) {
1730 MutexLocker m(VMStatistic_lock);
1731 RegisterMap reg_map(current,
1732 RegisterMap::UpdateMap::skip,
1733 RegisterMap::ProcessFrames::include,
1734 RegisterMap::WalkContinuation::skip);
1735 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1736 // produce statistics under the lock
1737 trace_ic_miss(f.pc());
1738 }
1739 #endif
1740
1741 // install an event collector so that when a vtable stub is created the
1742 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1743 // event can't be posted when the stub is created as locks are held
1744 // - instead the event will be deferred until the event collector goes
1745 // out of scope.
1746 JvmtiDynamicCodeEventCollector event_collector;
1747
1748 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1749 RegisterMap reg_map(current,
1750 RegisterMap::UpdateMap::skip,
1751 RegisterMap::ProcessFrames::include,
1752 RegisterMap::WalkContinuation::skip);
1753 frame caller_frame = current->last_frame().sender(®_map);
1754 CodeBlob* cb = caller_frame.cb();
1755 nmethod* caller_nm = cb->as_nmethod();
1756 // Calls via mismatching methods are always non-scalarized
1757 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1758 caller_does_not_scalarize = true;
1759 }
1760
1761 CompiledICLocker ml(caller_nm);
1762 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1763 inline_cache->update(&call_info, receiver()->klass(), caller_does_not_scalarize);
1764
1765 return callee_method;
1766 }
1767
1768 //
1769 // Resets a call-site in compiled code so it will get resolved again.
1770 // This routines handles both virtual call sites, optimized virtual call
1771 // sites, and static call sites. Typically used to change a call sites
1772 // destination from compiled to interpreted.
1773 //
1774 methodHandle SharedRuntime::reresolve_call_site(bool& is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1775 JavaThread* current = THREAD;
1776 ResourceMark rm(current);
1777 RegisterMap reg_map(current,
1778 RegisterMap::UpdateMap::skip,
1779 RegisterMap::ProcessFrames::include,
1780 RegisterMap::WalkContinuation::skip);
1781 frame stub_frame = current->last_frame();
1782 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1783 frame caller = stub_frame.sender(®_map);
1784 if (caller.is_compiled_frame()) {
1785 caller_does_not_scalarize = caller.cb()->as_nmethod()->is_compiled_by_c1();
1786 }
1787 assert(!caller.is_interpreted_frame(), "must be compiled");
1788
1789 // If the frame isn't a live compiled frame (i.e. deoptimized by the time we get here), no IC clearing must be done
1790 // for the caller. However, when the caller is C2 compiled and the callee a C1 or C2 compiled method, then we still
1791 // need to figure out whether it was an optimized virtual call with an inline type receiver. Otherwise, we end up
1792 // using the wrong method entry point and accidentally skip the buffering of the receiver.
1793 methodHandle callee_method = find_callee_method(caller_does_not_scalarize, CHECK_(methodHandle()));
1794 const bool caller_is_compiled_and_not_deoptimized = caller.is_compiled_frame() && !caller.is_deoptimized_frame();
1795 const bool caller_is_continuation_enter_intrinsic =
1796 caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic();
1797 const bool do_IC_clearing = caller_is_compiled_and_not_deoptimized || caller_is_continuation_enter_intrinsic;
1798
1799 const bool callee_compiled_with_scalarized_receiver = callee_method->has_compiled_code() &&
1800 !callee_method()->is_static() &&
1801 callee_method()->is_scalarized_arg(0);
1802 const bool compute_is_optimized = !caller_does_not_scalarize && callee_compiled_with_scalarized_receiver;
1803
1804 if (do_IC_clearing || compute_is_optimized) {
1805 address pc = caller.pc();
1806
1807 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1808 assert(caller_nm != nullptr, "did not find caller nmethod");
1809
1810 // Default call_addr is the location of the "basic" call.
1811 // Determine the address of the call we a reresolving. With
1812 // Inline Caches we will always find a recognizable call.
1813 // With Inline Caches disabled we may or may not find a
1814 // recognizable call. We will always find a call for static
1815 // calls and for optimized virtual calls. For vanilla virtual
1816 // calls it depends on the state of the UseInlineCaches switch.
1817 //
1818 // With Inline Caches disabled we can get here for a virtual call
1819 // for two reasons:
1820 // 1 - calling an abstract method. The vtable for abstract methods
1821 // will run us thru handle_wrong_method and we will eventually
1822 // end up in the interpreter to throw the ame.
1823 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1824 // call and between the time we fetch the entry address and
1825 // we jump to it the target gets deoptimized. Similar to 1
1826 // we will wind up in the interprter (thru a c2i with c2).
1827 //
1828 CompiledICLocker ml(caller_nm);
1829 address call_addr = caller_nm->call_instruction_address(pc);
1830
1831 if (call_addr != nullptr) {
1832 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1833 // bytes back in the instruction stream so we must also check for reloc info.
1834 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1835 bool ret = iter.next(); // Get item
1836 if (ret) {
1837 is_optimized = false;
1838 switch (iter.type()) {
1839 case relocInfo::static_call_type:
1840 assert(callee_method->is_static(), "must be");
1841 case relocInfo::opt_virtual_call_type: {
1842 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1843 if (do_IC_clearing) {
1844 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1845 cdc->set_to_clean();
1846 }
1847 break;
1848 }
1849
1850 case relocInfo::virtual_call_type: {
1851 if (do_IC_clearing) {
1852 // compiled, dispatched call (which used to call an interpreted method)
1853 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1854 inline_cache->set_to_clean();
1855 }
1856 break;
1857 }
1858 default:
1859 break;
1860 }
1861 }
1862 }
1863 }
1864
1865 #ifndef PRODUCT
1866 AtomicAccess::inc(&_wrong_method_ctr);
1867
1868 if (TraceCallFixup) {
1869 ResourceMark rm(current);
1870 tty->print("handle_wrong_method reresolving %s call to", (caller_does_not_scalarize) ? "non-scalar" : "");
1871 callee_method->print_short_name(tty);
1872 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1873 }
1874 #endif
1875
1876 return callee_method;
1877 }
1878
1879 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1880 // The faulting unsafe accesses should be changed to throw the error
1881 // synchronously instead. Meanwhile the faulting instruction will be
1882 // skipped over (effectively turning it into a no-op) and an
1883 // asynchronous exception will be raised which the thread will
1884 // handle at a later point. If the instruction is a load it will
1885 // return garbage.
1886
1887 // Request an async exception.
1888 thread->set_pending_unsafe_access_error();
1889
1890 // Return address of next instruction to execute.
1891 return next_pc;
1892 }
1893
1894 #ifdef ASSERT
1895 void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
1896 const BasicType* sig_bt,
1897 const VMRegPair* regs) {
1898 ResourceMark rm;
1899 const int total_args_passed = method->size_of_parameters();
1900 const VMRegPair* regs_with_member_name = regs;
1901 VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1902
1903 const int member_arg_pos = total_args_passed - 1;
1904 assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1905 assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1906
1907 java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
1908
1909 for (int i = 0; i < member_arg_pos; i++) {
1910 VMReg a = regs_with_member_name[i].first();
1911 VMReg b = regs_without_member_name[i].first();
1912 assert(a->value() == b->value(), "register allocation mismatch: a= %d, b= %d", a->value(), b->value());
1913 }
1914 assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1915 }
1916 #endif
1917
1918 // ---------------------------------------------------------------------------
1919 // We are calling the interpreter via a c2i. Normally this would mean that
1920 // we were called by a compiled method. However we could have lost a race
1921 // where we went int -> i2c -> c2i and so the caller could in fact be
1922 // interpreted. If the caller is compiled we attempt to patch the caller
1923 // so he no longer calls into the interpreter.
1924 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1925 AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
1926
1927 // It's possible that deoptimization can occur at a call site which hasn't
1928 // been resolved yet, in which case this function will be called from
1929 // an nmethod that has been patched for deopt and we can ignore the
1930 // request for a fixup.
1931 // Also it is possible that we lost a race in that from_compiled_entry
1932 // is now back to the i2c in that case we don't need to patch and if
1933 // we did we'd leap into space because the callsite needs to use
1934 // "to interpreter" stub in order to load up the Method*. Don't
1935 // ask me how I know this...
1936
1937 // Result from nmethod::is_unloading is not stable across safepoints.
1938 NoSafepointVerifier nsv;
1939
1940 nmethod* callee = method->code();
1941 if (callee == nullptr) {
1942 return;
1943 }
1944
1945 // write lock needed because we might patch call site by set_to_clean()
1946 // and is_unloading() can modify nmethod's state
1947 MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current()));
1948
1949 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1950 if (cb == nullptr || !cb->is_nmethod() || !callee->is_in_use() || callee->is_unloading()) {
1951 return;
1952 }
1953
1954 // The check above makes sure this is an nmethod.
1955 nmethod* caller = cb->as_nmethod();
1956
1957 // Get the return PC for the passed caller PC.
1958 address return_pc = caller_pc + frame::pc_return_offset;
1959
1960 if (!caller->is_in_use() || !NativeCall::is_call_before(return_pc)) {
1961 return;
1962 }
1963
1964 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1965 CompiledICLocker ic_locker(caller);
1966 ResourceMark rm;
1967
1968 // If we got here through a static call or opt_virtual call, then we know where the
1969 // call address would be; let's peek at it
1970 address callsite_addr = (address)nativeCall_before(return_pc);
1971 RelocIterator iter(caller, callsite_addr, callsite_addr + 1);
1972 if (!iter.next()) {
1973 // No reloc entry found; not a static or optimized virtual call
1974 return;
1975 }
1976
1977 relocInfo::relocType type = iter.reloc()->type();
1978 if (type != relocInfo::static_call_type &&
1979 type != relocInfo::opt_virtual_call_type) {
1980 return;
1981 }
1982
1983 CompiledDirectCall* callsite = CompiledDirectCall::before(return_pc);
1984 callsite->set_to_clean();
1985 JRT_END
1986
1987
1988 // same as JVM_Arraycopy, but called directly from compiled code
1989 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
1990 oopDesc* dest, jint dest_pos,
1991 jint length,
1992 JavaThread* current)) {
1993 #ifndef PRODUCT
1994 _slow_array_copy_ctr++;
1995 #endif
1996 // Check if we have null pointers
1997 if (src == nullptr || dest == nullptr) {
1998 THROW(vmSymbols::java_lang_NullPointerException());
1999 }
2000 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
2001 // even though the copy_array API also performs dynamic checks to ensure
2002 // that src and dest are truly arrays (and are conformable).
2003 // The copy_array mechanism is awkward and could be removed, but
2004 // the compilers don't call this function except as a last resort,
2005 // so it probably doesn't matter.
2006 src->klass()->copy_array((arrayOopDesc*)src, src_pos,
2007 (arrayOopDesc*)dest, dest_pos,
2008 length, current);
2009 }
2010 JRT_END
2011
2012 // The caller of generate_class_cast_message() (or one of its callers)
2013 // must use a ResourceMark in order to correctly free the result.
2014 char* SharedRuntime::generate_class_cast_message(
2015 JavaThread* thread, Klass* caster_klass) {
2016
2017 // Get target class name from the checkcast instruction
2018 vframeStream vfst(thread, true);
2019 assert(!vfst.at_end(), "Java frame must exist");
2020 Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
2021 constantPoolHandle cpool(thread, vfst.method()->constants());
2022 Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index());
2023 Symbol* target_klass_name = nullptr;
2024 if (target_klass == nullptr) {
2025 // This klass should be resolved, but just in case, get the name in the klass slot.
2026 target_klass_name = cpool->klass_name_at(cc.index());
2027 }
2028 return generate_class_cast_message(caster_klass, target_klass, target_klass_name);
2029 }
2030
2031
2032 // The caller of generate_class_cast_message() (or one of its callers)
2033 // must use a ResourceMark in order to correctly free the result.
2034 char* SharedRuntime::generate_class_cast_message(
2035 Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) {
2036 const char* caster_name = caster_klass->external_name();
2037
2038 assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided");
2039 const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() :
2040 target_klass->external_name();
2041
2042 size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1;
2043
2044 const char* caster_klass_description = "";
2045 const char* target_klass_description = "";
2046 const char* klass_separator = "";
2047 if (target_klass != nullptr && caster_klass->module() == target_klass->module()) {
2048 caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass);
2049 } else {
2050 caster_klass_description = caster_klass->class_in_module_of_loader();
2051 target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : "";
2052 klass_separator = (target_klass != nullptr) ? "; " : "";
2053 }
2054
2055 // add 3 for parenthesis and preceding space
2056 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2057
2058 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2059 if (message == nullptr) {
2060 // Shouldn't happen, but don't cause even more problems if it does
2061 message = const_cast<char*>(caster_klass->external_name());
2062 } else {
2063 jio_snprintf(message,
2064 msglen,
2065 "class %s cannot be cast to class %s (%s%s%s)",
2066 caster_name,
2067 target_name,
2068 caster_klass_description,
2069 klass_separator,
2070 target_klass_description
2071 );
2072 }
2073 return message;
2074 }
2075
2076 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2077 assert(klass->is_inline_klass(), "Must be a concrete value class");
2078 const char* desc = "Cannot synchronize on an instance of value class ";
2079 const char* className = klass->external_name();
2080 size_t msglen = strlen(desc) + strlen(className) + 1;
2081 char* message = NEW_RESOURCE_ARRAY(char, msglen);
2082 if (nullptr == message) {
2083 // Out of memory: can't create detailed error message
2084 message = const_cast<char*>(klass->external_name());
2085 } else {
2086 jio_snprintf(message, msglen, "%s%s", desc, className);
2087 }
2088 return message;
2089 }
2090
2091 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2092 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2093 JRT_END
2094
2095 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2096 if (!SafepointSynchronize::is_synchronizing()) {
2097 // Only try quick_enter() if we're not trying to reach a safepoint
2098 // so that the calling thread reaches the safepoint more quickly.
2099 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2100 return;
2101 }
2102 }
2103 // NO_ASYNC required because an async exception on the state transition destructor
2104 // would leave you with the lock held and it would never be released.
2105 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2106 // and the model is that an exception implies the method failed.
2107 JRT_BLOCK_NO_ASYNC
2108 Handle h_obj(THREAD, obj);
2109 ObjectSynchronizer::enter(h_obj, lock, current);
2110 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2111 JRT_BLOCK_END
2112 }
2113
2114 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2115 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2116 SharedRuntime::monitor_enter_helper(obj, lock, current);
2117 JRT_END
2118
2119 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2120 assert(JavaThread::current() == current, "invariant");
2121 // Exit must be non-blocking, and therefore no exceptions can be thrown.
2122 ExceptionMark em(current);
2123
2124 // Check if C2_MacroAssembler::fast_unlock() or
2125 // C2_MacroAssembler::fast_unlock() unlocked an inflated
2126 // monitor before going slow path. Since there is no safepoint
2127 // polling when calling into the VM, we can be sure that the monitor
2128 // hasn't been deallocated.
2129 ObjectMonitor* m = current->unlocked_inflated_monitor();
2130 if (m != nullptr) {
2131 assert(!m->has_owner(current), "must be");
2132 current->clear_unlocked_inflated_monitor();
2133
2134 // We need to reacquire the lock before we can call ObjectSynchronizer::exit().
2135 if (!m->try_enter(current, /*check_for_recursion*/ false)) {
2136 // Some other thread acquired the lock (or the monitor was
2137 // deflated). Either way we are done.
2138 return;
2139 }
2140 }
2141
2142 // The object could become unlocked through a JNI call, which we have no other checks for.
2143 // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2144 if (obj->is_unlocked()) {
2145 if (CheckJNICalls) {
2146 fatal("Object has been unlocked by JNI");
2147 }
2148 return;
2149 }
2150 ObjectSynchronizer::exit(obj, lock, current);
2151 }
2152
2153 // Handles the uncommon cases of monitor unlocking in compiled code
2154 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2155 assert(current == JavaThread::current(), "pre-condition");
2156 SharedRuntime::monitor_exit_helper(obj, lock, current);
2157 JRT_END
2158
2159 #ifndef PRODUCT
2160
2161 void SharedRuntime::print_statistics() {
2162 ttyLocker ttyl;
2163 if (xtty != nullptr) xtty->head("statistics type='SharedRuntime'");
2164
2165 SharedRuntime::print_ic_miss_histogram();
2166
2167 // Dump the JRT_ENTRY counters
2168 if (_new_instance_ctr) tty->print_cr("%5u new instance requires GC", _new_instance_ctr);
2169 if (_new_array_ctr) tty->print_cr("%5u new array requires GC", _new_array_ctr);
2170 if (_multi2_ctr) tty->print_cr("%5u multianewarray 2 dim", _multi2_ctr);
2171 if (_multi3_ctr) tty->print_cr("%5u multianewarray 3 dim", _multi3_ctr);
2172 if (_multi4_ctr) tty->print_cr("%5u multianewarray 4 dim", _multi4_ctr);
2173 if (_multi5_ctr) tty->print_cr("%5u multianewarray 5 dim", _multi5_ctr);
2174
2175 tty->print_cr("%5u inline cache miss in compiled", _ic_miss_ctr);
2176 tty->print_cr("%5u wrong method", _wrong_method_ctr);
2177 tty->print_cr("%5u unresolved static call site", _resolve_static_ctr);
2178 tty->print_cr("%5u unresolved virtual call site", _resolve_virtual_ctr);
2179 tty->print_cr("%5u unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2180
2181 if (_mon_enter_stub_ctr) tty->print_cr("%5u monitor enter stub", _mon_enter_stub_ctr);
2182 if (_mon_exit_stub_ctr) tty->print_cr("%5u monitor exit stub", _mon_exit_stub_ctr);
2183 if (_mon_enter_ctr) tty->print_cr("%5u monitor enter slow", _mon_enter_ctr);
2184 if (_mon_exit_ctr) tty->print_cr("%5u monitor exit slow", _mon_exit_ctr);
2185 if (_partial_subtype_ctr) tty->print_cr("%5u slow partial subtype", _partial_subtype_ctr);
2186 if (_jbyte_array_copy_ctr) tty->print_cr("%5u byte array copies", _jbyte_array_copy_ctr);
2187 if (_jshort_array_copy_ctr) tty->print_cr("%5u short array copies", _jshort_array_copy_ctr);
2188 if (_jint_array_copy_ctr) tty->print_cr("%5u int array copies", _jint_array_copy_ctr);
2189 if (_jlong_array_copy_ctr) tty->print_cr("%5u long array copies", _jlong_array_copy_ctr);
2190 if (_oop_array_copy_ctr) tty->print_cr("%5u oop array copies", _oop_array_copy_ctr);
2191 if (_checkcast_array_copy_ctr) tty->print_cr("%5u checkcast array copies", _checkcast_array_copy_ctr);
2192 if (_unsafe_array_copy_ctr) tty->print_cr("%5u unsafe array copies", _unsafe_array_copy_ctr);
2193 if (_generic_array_copy_ctr) tty->print_cr("%5u generic array copies", _generic_array_copy_ctr);
2194 if (_slow_array_copy_ctr) tty->print_cr("%5u slow array copies", _slow_array_copy_ctr);
2195 if (_find_handler_ctr) tty->print_cr("%5u find exception handler", _find_handler_ctr);
2196 if (_rethrow_ctr) tty->print_cr("%5u rethrow handler", _rethrow_ctr);
2197 if (_unsafe_set_memory_ctr) tty->print_cr("%5u unsafe set memorys", _unsafe_set_memory_ctr);
2198
2199 AdapterHandlerLibrary::print_statistics();
2200
2201 if (xtty != nullptr) xtty->tail("statistics");
2202 }
2203
2204 inline double percent(int64_t x, int64_t y) {
2205 return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
2206 }
2207
2208 class MethodArityHistogram {
2209 public:
2210 enum { MAX_ARITY = 256 };
2211 private:
2212 static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2213 static uint64_t _size_histogram[MAX_ARITY]; // histogram of arg size in words
2214 static uint64_t _total_compiled_calls;
2215 static uint64_t _max_compiled_calls_per_method;
2216 static int _max_arity; // max. arity seen
2217 static int _max_size; // max. arg size seen
2218
2219 static void add_method_to_histogram(nmethod* nm) {
2220 Method* method = (nm == nullptr) ? nullptr : nm->method();
2221 if (method != nullptr) {
2222 ArgumentCount args(method->signature());
2223 int arity = args.size() + (method->is_static() ? 0 : 1);
2224 int argsize = method->size_of_parameters();
2225 arity = MIN2(arity, MAX_ARITY-1);
2226 argsize = MIN2(argsize, MAX_ARITY-1);
2227 uint64_t count = (uint64_t)method->compiled_invocation_count();
2228 _max_compiled_calls_per_method = count > _max_compiled_calls_per_method ? count : _max_compiled_calls_per_method;
2229 _total_compiled_calls += count;
2230 _arity_histogram[arity] += count;
2231 _size_histogram[argsize] += count;
2232 _max_arity = MAX2(_max_arity, arity);
2233 _max_size = MAX2(_max_size, argsize);
2234 }
2235 }
2236
2237 void print_histogram_helper(int n, uint64_t* histo, const char* name) {
2238 const int N = MIN2(9, n);
2239 double sum = 0;
2240 double weighted_sum = 0;
2241 for (int i = 0; i <= n; i++) { sum += (double)histo[i]; weighted_sum += (double)(i*histo[i]); }
2242 if (sum >= 1) { // prevent divide by zero or divide overflow
2243 double rest = sum;
2244 double percent = sum / 100;
2245 for (int i = 0; i <= N; i++) {
2246 rest -= (double)histo[i];
2247 tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], (double)histo[i] / percent);
2248 }
2249 tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent);
2250 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2251 tty->print_cr("(total # of compiled calls = " INT64_FORMAT_W(14) ")", _total_compiled_calls);
2252 tty->print_cr("(max # of compiled calls = " INT64_FORMAT_W(14) ")", _max_compiled_calls_per_method);
2253 } else {
2254 tty->print_cr("Histogram generation failed for %s. n = %d, sum = %7.5f", name, n, sum);
2255 }
2256 }
2257
2258 void print_histogram() {
2259 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2260 print_histogram_helper(_max_arity, _arity_histogram, "arity");
2261 tty->print_cr("\nHistogram of parameter block size (in words, incl. rcvr):");
2262 print_histogram_helper(_max_size, _size_histogram, "size");
2263 tty->cr();
2264 }
2265
2266 public:
2267 MethodArityHistogram() {
2268 // Take the Compile_lock to protect against changes in the CodeBlob structures
2269 MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2270 // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2271 MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2272 _max_arity = _max_size = 0;
2273 _total_compiled_calls = 0;
2274 _max_compiled_calls_per_method = 0;
2275 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2276 CodeCache::nmethods_do(add_method_to_histogram);
2277 print_histogram();
2278 }
2279 };
2280
2281 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2282 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2283 uint64_t MethodArityHistogram::_total_compiled_calls;
2284 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2285 int MethodArityHistogram::_max_arity;
2286 int MethodArityHistogram::_max_size;
2287
2288 void SharedRuntime::print_call_statistics(uint64_t comp_total) {
2289 tty->print_cr("Calls from compiled code:");
2290 int64_t total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2291 int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2292 int64_t mono_i = _nof_interface_calls;
2293 tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%) total non-inlined ", total);
2294 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
2295 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2296 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
2297 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2298 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
2299 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2300 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
2301 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2302 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2303 tty->cr();
2304 tty->print_cr("Note 1: counter updates are not MT-safe.");
2305 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2306 tty->print_cr(" %% in nested categories are relative to their category");
2307 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2308 tty->cr();
2309
2310 MethodArityHistogram h;
2311 }
2312 #endif
2313
2314 #ifndef PRODUCT
2315 static int _lookups; // number of calls to lookup
2316 static int _equals; // number of buckets checked with matching hash
2317 static int _archived_hits; // number of successful lookups in archived table
2318 static int _runtime_hits; // number of successful lookups in runtime table
2319 #endif
2320
2321 // A simple wrapper class around the calling convention information
2322 // that allows sharing of adapters for the same calling convention.
2323 class AdapterFingerPrint : public MetaspaceObj {
2324 public:
2325 class Element {
2326 private:
2327 // The highest byte is the type of the argument. The remaining bytes contain the offset of the
2328 // field if it is flattened in the calling convention, -1 otherwise.
2329 juint _payload;
2330
2331 static constexpr int offset_bit_width = 24;
2332 static constexpr juint offset_bit_mask = (1 << offset_bit_width) - 1;
2333 public:
2334 Element(BasicType bt, int offset) : _payload((static_cast<juint>(bt) << offset_bit_width) | (juint(offset) & offset_bit_mask)) {
2335 assert(offset >= -1 && offset < jint(offset_bit_mask), "invalid offset %d", offset);
2336 }
2337
2338 BasicType bt() const {
2339 return static_cast<BasicType>(_payload >> offset_bit_width);
2340 }
2341
2342 int offset() const {
2343 juint res = _payload & offset_bit_mask;
2344 return res == offset_bit_mask ? -1 : res;
2345 }
2346
2347 juint hash() const {
2348 return _payload;
2349 }
2350
2351 bool operator!=(const Element& other) const {
2352 return _payload != other._payload;
2353 }
2354 };
2355
2356 private:
2357 const bool _has_ro_adapter;
2358 const int _length;
2359
2360 static int data_offset() { return sizeof(AdapterFingerPrint); }
2361 Element* data_pointer() {
2362 return reinterpret_cast<Element*>(reinterpret_cast<address>(this) + data_offset());
2363 }
2364
2365 const Element& element_at(int index) {
2366 assert(index < length(), "index %d out of bounds for length %d", index, length());
2367 Element* data = data_pointer();
2368 return data[index];
2369 }
2370
2371 // Private construtor. Use allocate() to get an instance.
2372 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter)
2373 : _has_ro_adapter(has_ro_adapter), _length(total_args_passed_in_sig(sig)) {
2374 Element* data = data_pointer();
2375 BasicType prev_bt = T_ILLEGAL;
2376 int vt_count = 0;
2377 for (int index = 0; index < _length; index++) {
2378 const SigEntry& sig_entry = sig->at(index);
2379 BasicType bt = sig_entry._bt;
2380 if (bt == T_METADATA) {
2381 // Found start of inline type in signature
2382 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2383 vt_count++;
2384 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2385 // Found end of inline type in signature
2386 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2387 vt_count--;
2388 assert(vt_count >= 0, "invalid vt_count");
2389 } else if (vt_count == 0) {
2390 // Widen fields that are not part of a scalarized inline type argument
2391 assert(sig_entry._offset == -1, "invalid offset for argument that is not a flattened field %d", sig_entry._offset);
2392 bt = adapter_encoding(bt);
2393 }
2394
2395 ::new(&data[index]) Element(bt, sig_entry._offset);
2396 prev_bt = bt;
2397 }
2398 assert(vt_count == 0, "invalid vt_count");
2399 }
2400
2401 // Call deallocate instead
2402 ~AdapterFingerPrint() {
2403 ShouldNotCallThis();
2404 }
2405
2406 static int total_args_passed_in_sig(const GrowableArray<SigEntry>* sig) {
2407 return (sig != nullptr) ? sig->length() : 0;
2408 }
2409
2410 static int compute_size_in_words(int len) {
2411 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(Element)));
2412 }
2413
2414 // Remap BasicTypes that are handled equivalently by the adapters.
2415 // These are correct for the current system but someday it might be
2416 // necessary to make this mapping platform dependent.
2417 static BasicType adapter_encoding(BasicType in) {
2418 switch (in) {
2419 case T_BOOLEAN:
2420 case T_BYTE:
2421 case T_SHORT:
2422 case T_CHAR:
2423 // They are all promoted to T_INT in the calling convention
2424 return T_INT;
2425
2426 case T_OBJECT:
2427 case T_ARRAY:
2428 // In other words, we assume that any register good enough for
2429 // an int or long is good enough for a managed pointer.
2430 #ifdef _LP64
2431 return T_LONG;
2432 #else
2433 return T_INT;
2434 #endif
2435
2436 case T_INT:
2437 case T_LONG:
2438 case T_FLOAT:
2439 case T_DOUBLE:
2440 case T_VOID:
2441 return in;
2442
2443 default:
2444 ShouldNotReachHere();
2445 return T_CONFLICT;
2446 }
2447 }
2448
2449 void* operator new(size_t size, size_t fp_size) throw() {
2450 assert(fp_size >= size, "sanity check");
2451 void* p = AllocateHeap(fp_size, mtCode);
2452 memset(p, 0, fp_size);
2453 return p;
2454 }
2455
2456 public:
2457 template<typename Function>
2458 void iterate_args(Function function) {
2459 for (int i = 0; i < length(); i++) {
2460 function(element_at(i));
2461 }
2462 }
2463
2464 static AdapterFingerPrint* allocate(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2465 int len = total_args_passed_in_sig(sig);
2466 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2467 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(sig, has_ro_adapter);
2468 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2469 return afp;
2470 }
2471
2472 static void deallocate(AdapterFingerPrint* fp) {
2473 FreeHeap(fp);
2474 }
2475
2476 bool has_ro_adapter() const {
2477 return _has_ro_adapter;
2478 }
2479
2480 int length() const {
2481 return _length;
2482 }
2483
2484 unsigned int compute_hash() {
2485 int hash = 0;
2486 for (int i = 0; i < length(); i++) {
2487 const Element& v = element_at(i);
2488 //Add arithmetic operation to the hash, like +3 to improve hashing
2489 hash = ((hash << 8) ^ v.hash() ^ (hash >> 5)) + 3;
2490 }
2491 return (unsigned int)hash;
2492 }
2493
2494 const char* as_string() {
2495 stringStream st;
2496 st.print("{");
2497 if (_has_ro_adapter) {
2498 st.print("has_ro_adapter");
2499 } else {
2500 st.print("no_ro_adapter");
2501 }
2502 for (int i = 0; i < length(); i++) {
2503 st.print(", ");
2504 const Element& elem = element_at(i);
2505 st.print("{%s, %d}", type2name(elem.bt()), elem.offset());
2506 }
2507 st.print("}");
2508 return st.as_string();
2509 }
2510
2511 const char* as_basic_args_string() {
2512 stringStream st;
2513 bool long_prev = false;
2514 iterate_args([&] (const Element& arg) {
2515 if (long_prev) {
2516 long_prev = false;
2517 if (arg.bt() == T_VOID) {
2518 st.print("J");
2519 } else {
2520 st.print("L");
2521 }
2522 }
2523 if (arg.bt() == T_LONG) {
2524 long_prev = true;
2525 } else if (arg.bt() != T_VOID) {
2526 st.print("%c", type2char(arg.bt()));
2527 }
2528 });
2529 if (long_prev) {
2530 st.print("L");
2531 }
2532 return st.as_string();
2533 }
2534
2535 bool equals(AdapterFingerPrint* other) {
2536 if (other->_has_ro_adapter != _has_ro_adapter) {
2537 return false;
2538 } else if (other->_length != _length) {
2539 return false;
2540 } else {
2541 for (int i = 0; i < _length; i++) {
2542 if (element_at(i) != other->element_at(i)) {
2543 return false;
2544 }
2545 }
2546 }
2547 return true;
2548 }
2549
2550 // methods required by virtue of being a MetaspaceObj
2551 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2552 int size() const { return compute_size_in_words(_length); }
2553 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2554
2555 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2556 NOT_PRODUCT(_equals++);
2557 return fp1->equals(fp2);
2558 }
2559
2560 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2561 return fp->compute_hash();
2562 }
2563 };
2564
2565 #if INCLUDE_CDS
2566 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2567 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2568 }
2569
2570 class ArchivedAdapterTable : public OffsetCompactHashtable<
2571 AdapterFingerPrint*,
2572 AdapterHandlerEntry*,
2573 adapter_fp_equals_compact_hashtable_entry> {};
2574 #endif // INCLUDE_CDS
2575
2576 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2577 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2578 AnyObj::C_HEAP, mtCode,
2579 AdapterFingerPrint::compute_hash,
2580 AdapterFingerPrint::equals>;
2581 static AdapterHandlerTable* _adapter_handler_table;
2582 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2583
2584 // Find a entry with the same fingerprint if it exists
2585 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter) {
2586 NOT_PRODUCT(_lookups++);
2587 assert_lock_strong(AdapterHandlerLibrary_lock);
2588 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(sig, has_ro_adapter);
2589 AdapterHandlerEntry* entry = nullptr;
2590 #if INCLUDE_CDS
2591 // if we are building the archive then the archived adapter table is
2592 // not valid and we need to use the ones added to the runtime table
2593 if (AOTCodeCache::is_using_adapter()) {
2594 // Search archived table first. It is read-only table so can be searched without lock
2595 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2596 #ifndef PRODUCT
2597 if (entry != nullptr) {
2598 _archived_hits++;
2599 }
2600 #endif
2601 }
2602 #endif // INCLUDE_CDS
2603 if (entry == nullptr) {
2604 assert_lock_strong(AdapterHandlerLibrary_lock);
2605 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2606 if (entry_p != nullptr) {
2607 entry = *entry_p;
2608 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2609 entry->fingerprint()->as_basic_args_string(), entry->fingerprint()->as_string(), entry->fingerprint()->compute_hash(),
2610 fp->as_basic_args_string(), fp->as_string(), fp->compute_hash());
2611 #ifndef PRODUCT
2612 _runtime_hits++;
2613 #endif
2614 }
2615 }
2616 AdapterFingerPrint::deallocate(fp);
2617 return entry;
2618 }
2619
2620 #ifndef PRODUCT
2621 static void print_table_statistics() {
2622 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2623 return sizeof(*key) + sizeof(*a);
2624 };
2625 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2626 ts.print(tty, "AdapterHandlerTable");
2627 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2628 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2629 int total_hits = _archived_hits + _runtime_hits;
2630 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2631 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2632 }
2633 #endif
2634
2635 // ---------------------------------------------------------------------------
2636 // Implementation of AdapterHandlerLibrary
2637 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2638 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2639 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2640 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2641 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2642 #if INCLUDE_CDS
2643 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2644 #endif // INCLUDE_CDS
2645 static const int AdapterHandlerLibrary_size = 48*K;
2646 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2647 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2648
2649 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2650 assert(_buffer != nullptr, "should be initialized");
2651 return _buffer;
2652 }
2653
2654 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2655 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2656 AdapterBlob* adapter_blob = entry->adapter_blob();
2657 char blob_id[256];
2658 jio_snprintf(blob_id,
2659 sizeof(blob_id),
2660 "%s(%s)",
2661 adapter_blob->name(),
2662 entry->fingerprint()->as_string());
2663 if (Forte::is_enabled()) {
2664 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2665 }
2666
2667 if (JvmtiExport::should_post_dynamic_code_generated()) {
2668 JvmtiExport::post_dynamic_code_generated(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2669 }
2670 }
2671 }
2672
2673 void AdapterHandlerLibrary::initialize() {
2674 {
2675 ResourceMark rm;
2676 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2677 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2678 }
2679
2680 #if INCLUDE_CDS
2681 // Link adapters in AOT Cache to their code in AOT Code Cache
2682 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2683 link_aot_adapters();
2684 lookup_simple_adapters();
2685 return;
2686 }
2687 #endif // INCLUDE_CDS
2688
2689 ResourceMark rm;
2690 {
2691 MutexLocker mu(AdapterHandlerLibrary_lock);
2692
2693 CompiledEntrySignature no_args;
2694 no_args.compute_calling_conventions();
2695 _no_arg_handler = create_adapter(no_args, true);
2696
2697 CompiledEntrySignature obj_args;
2698 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
2699 obj_args.compute_calling_conventions();
2700 _obj_arg_handler = create_adapter(obj_args, true);
2701
2702 CompiledEntrySignature int_args;
2703 SigEntry::add_entry(int_args.sig(), T_INT);
2704 int_args.compute_calling_conventions();
2705 _int_arg_handler = create_adapter(int_args, true);
2706
2707 CompiledEntrySignature obj_int_args;
2708 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
2709 SigEntry::add_entry(obj_int_args.sig(), T_INT);
2710 obj_int_args.compute_calling_conventions();
2711 _obj_int_arg_handler = create_adapter(obj_int_args, true);
2712
2713 CompiledEntrySignature obj_obj_args;
2714 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2715 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2716 obj_obj_args.compute_calling_conventions();
2717 _obj_obj_arg_handler = create_adapter(obj_obj_args, true);
2718
2719 // we should always get an entry back but we don't have any
2720 // associated blob on Zero
2721 assert(_no_arg_handler != nullptr &&
2722 _obj_arg_handler != nullptr &&
2723 _int_arg_handler != nullptr &&
2724 _obj_int_arg_handler != nullptr &&
2725 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2726 }
2727
2728 // Outside of the lock
2729 #ifndef ZERO
2730 // no blobs to register when we are on Zero
2731 post_adapter_creation(_no_arg_handler);
2732 post_adapter_creation(_obj_arg_handler);
2733 post_adapter_creation(_int_arg_handler);
2734 post_adapter_creation(_obj_int_arg_handler);
2735 post_adapter_creation(_obj_obj_arg_handler);
2736 #endif // ZERO
2737 }
2738
2739 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2740 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2741 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2742 return AdapterHandlerEntry::allocate(id, fingerprint);
2743 }
2744
2745 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2746 int total_args_passed = method->size_of_parameters(); // All args on stack
2747 if (total_args_passed == 0) {
2748 return _no_arg_handler;
2749 } else if (total_args_passed == 1) {
2750 if (!method->is_static()) {
2751 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2752 return nullptr;
2753 }
2754 return _obj_arg_handler;
2755 }
2756 switch (method->signature()->char_at(1)) {
2757 case JVM_SIGNATURE_CLASS: {
2758 if (InlineTypePassFieldsAsArgs) {
2759 SignatureStream ss(method->signature());
2760 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2761 if (vk != nullptr) {
2762 return nullptr;
2763 }
2764 }
2765 return _obj_arg_handler;
2766 }
2767 case JVM_SIGNATURE_ARRAY:
2768 return _obj_arg_handler;
2769 case JVM_SIGNATURE_INT:
2770 case JVM_SIGNATURE_BOOLEAN:
2771 case JVM_SIGNATURE_CHAR:
2772 case JVM_SIGNATURE_BYTE:
2773 case JVM_SIGNATURE_SHORT:
2774 return _int_arg_handler;
2775 }
2776 } else if (total_args_passed == 2 &&
2777 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2778 switch (method->signature()->char_at(1)) {
2779 case JVM_SIGNATURE_CLASS: {
2780 if (InlineTypePassFieldsAsArgs) {
2781 SignatureStream ss(method->signature());
2782 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2783 if (vk != nullptr) {
2784 return nullptr;
2785 }
2786 }
2787 return _obj_obj_arg_handler;
2788 }
2789 case JVM_SIGNATURE_ARRAY:
2790 return _obj_obj_arg_handler;
2791 case JVM_SIGNATURE_INT:
2792 case JVM_SIGNATURE_BOOLEAN:
2793 case JVM_SIGNATURE_CHAR:
2794 case JVM_SIGNATURE_BYTE:
2795 case JVM_SIGNATURE_SHORT:
2796 return _obj_int_arg_handler;
2797 }
2798 }
2799 return nullptr;
2800 }
2801
2802 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2803 _method(method), _num_inline_args(0), _has_inline_recv(false),
2804 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2805 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2806 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2807 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2808 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2809 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2810 }
2811
2812 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2813 // or the same entry for VEP and VIEP(RO).
2814 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2815 if (!has_scalarized_args()) {
2816 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2817 return CodeOffsets::Verified_Entry;
2818 }
2819 if (_method->is_static()) {
2820 // Static methods don't need VIEP(RO)
2821 return CodeOffsets::Verified_Entry;
2822 }
2823
2824 if (has_inline_recv()) {
2825 if (num_inline_args() == 1) {
2826 // Share same entry for VIEP and VIEP(RO).
2827 // This is quite common: we have an instance method in an InlineKlass that has
2828 // no inline type args other than <this>.
2829 return CodeOffsets::Verified_Inline_Entry;
2830 } else {
2831 assert(num_inline_args() > 1, "must be");
2832 // No sharing:
2833 // VIEP(RO) -- <this> is passed as object
2834 // VEP -- <this> is passed as fields
2835 return CodeOffsets::Verified_Inline_Entry_RO;
2836 }
2837 }
2838
2839 // Either a static method, or <this> is not an inline type
2840 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2841 // No sharing:
2842 // Some arguments are passed on the stack, and we have inserted reserved entries
2843 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2844 return CodeOffsets::Verified_Inline_Entry_RO;
2845 } else {
2846 // Share same entry for VEP and VIEP(RO).
2847 return CodeOffsets::Verified_Entry;
2848 }
2849 }
2850
2851 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2852 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2853 if (_supers != nullptr) {
2854 return _supers;
2855 }
2856 _supers = new GrowableArray<Method*>();
2857 // Skip private, static, and <init> methods
2858 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2859 return _supers;
2860 }
2861 Symbol* name = _method->name();
2862 Symbol* signature = _method->signature();
2863 const Klass* holder = _method->method_holder()->super();
2864 Symbol* holder_name = holder->name();
2865 ThreadInVMfromUnknown tiv;
2866 JavaThread* current = JavaThread::current();
2867 HandleMark hm(current);
2868 Handle loader(current, _method->method_holder()->class_loader());
2869
2870 // Walk up the class hierarchy and search for super methods
2871 while (holder != nullptr) {
2872 Method* super_method = holder->lookup_method(name, signature);
2873 if (super_method == nullptr) {
2874 break;
2875 }
2876 if (!super_method->is_static() && !super_method->is_private() &&
2877 (!super_method->is_package_private() ||
2878 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2879 _supers->push(super_method);
2880 }
2881 holder = super_method->method_holder()->super();
2882 }
2883 // Search interfaces for super methods
2884 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2885 for (int i = 0; i < interfaces->length(); ++i) {
2886 Method* m = interfaces->at(i)->lookup_method(name, signature);
2887 if (m != nullptr && !m->is_static() && m->is_public()) {
2888 _supers->push(m);
2889 }
2890 }
2891 return _supers;
2892 }
2893
2894 // Iterate over arguments and compute scalarized and non-scalarized signatures
2895 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2896 bool has_scalarized = false;
2897 if (_method != nullptr) {
2898 InstanceKlass* holder = _method->method_holder();
2899 int arg_num = 0;
2900 if (!_method->is_static()) {
2901 // We shouldn't scalarize 'this' in a value class constructor
2902 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2903 (init || _method->is_scalarized_arg(arg_num))) {
2904 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2905 _sig_cc->insert_before(1, SigEntry(T_OBJECT, 0, nullptr, false, true)); // buffer argument
2906 has_scalarized = true;
2907 _has_inline_recv = true;
2908 _num_inline_args++;
2909 } else {
2910 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2911 }
2912 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2913 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2914 arg_num++;
2915 }
2916 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2917 BasicType bt = ss.type();
2918 if (bt == T_OBJECT) {
2919 InlineKlass* vk = ss.as_inline_klass(holder);
2920 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2921 // Check for a calling convention mismatch with super method(s)
2922 bool scalar_super = false;
2923 bool non_scalar_super = false;
2924 GrowableArray<Method*>* supers = get_supers();
2925 for (int i = 0; i < supers->length(); ++i) {
2926 Method* super_method = supers->at(i);
2927 if (super_method->is_scalarized_arg(arg_num)) {
2928 scalar_super = true;
2929 } else {
2930 non_scalar_super = true;
2931 }
2932 }
2933 #ifdef ASSERT
2934 // Randomly enable below code paths for stress testing
2935 bool stress = init && StressCallingConvention;
2936 if (stress && (os::random() & 1) == 1) {
2937 non_scalar_super = true;
2938 if ((os::random() & 1) == 1) {
2939 scalar_super = true;
2940 }
2941 }
2942 #endif
2943 if (non_scalar_super) {
2944 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2945 if (scalar_super) {
2946 // Found non-scalar *and* scalar super methods. We can't handle both.
2947 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2948 for (int i = 0; i < supers->length(); ++i) {
2949 Method* super_method = supers->at(i);
2950 if (super_method->is_scalarized_arg(arg_num) DEBUG_ONLY(|| (stress && (os::random() & 1) == 1))) {
2951 JavaThread* thread = JavaThread::current();
2952 HandleMark hm(thread);
2953 methodHandle mh(thread, super_method);
2954 DeoptimizationScope deopt_scope;
2955 {
2956 // Keep the lock scope minimal. Prevent interference with other
2957 // dependency checks by setting mismatch and marking within the lock.
2958 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2959 super_method->set_mismatch();
2960 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2961 }
2962 deopt_scope.deoptimize_marked();
2963 }
2964 }
2965 }
2966 // Fall back to non-scalarized calling convention
2967 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2968 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2969 } else {
2970 _num_inline_args++;
2971 has_scalarized = true;
2972 int last = _sig_cc->length();
2973 int last_ro = _sig_cc_ro->length();
2974 _sig_cc->appendAll(vk->extended_sig());
2975 _sig_cc_ro->appendAll(vk->extended_sig());
2976 // buffer argument
2977 _sig_cc->insert_before(last + 1, SigEntry(T_OBJECT, 0, nullptr, false, true));
2978 _sig_cc_ro->insert_before(last_ro + 1, SigEntry(T_OBJECT, 0, nullptr, false, true));
2979 // Insert InlineTypeNode::NullMarker field right after T_METADATA delimiter
2980 _sig_cc->insert_before(last + 2, SigEntry(T_BOOLEAN, -1, nullptr, true, false));
2981 _sig_cc_ro->insert_before(last_ro + 2, SigEntry(T_BOOLEAN, -1, nullptr, true, false));
2982 }
2983 } else {
2984 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2985 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2986 }
2987 bt = T_OBJECT;
2988 } else {
2989 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2990 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2991 }
2992 SigEntry::add_entry(_sig, bt, ss.as_symbol());
2993 if (bt != T_VOID) {
2994 arg_num++;
2995 }
2996 }
2997 }
2998
2999 // Compute the non-scalarized calling convention
3000 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3001 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3002
3003 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3004 if (has_scalarized && !_method->is_native()) {
3005 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3006 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3007
3008 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3009 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3010
3011 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3012 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3013
3014 // Upper bound on stack arguments to avoid hitting the argument limit and
3015 // bailing out of compilation ("unsupported incoming calling sequence").
3016 // TODO we need a reasonable limit (flag?) here
3017 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 75) {
3018 return; // Success
3019 }
3020 }
3021
3022 // No scalarized args
3023 _sig_cc = _sig;
3024 _regs_cc = _regs;
3025 _args_on_stack_cc = _args_on_stack;
3026
3027 _sig_cc_ro = _sig;
3028 _regs_cc_ro = _regs;
3029 _args_on_stack_cc_ro = _args_on_stack;
3030 }
3031
3032 void CompiledEntrySignature::initialize_from_fingerprint(AdapterFingerPrint* fingerprint) {
3033 _has_inline_recv = fingerprint->has_ro_adapter();
3034
3035 int value_object_count = 0;
3036 BasicType prev_bt = T_ILLEGAL;
3037 bool has_scalarized_arguments = false;
3038 bool long_prev = false;
3039 int long_prev_offset = -1;
3040 bool skipping_inline_recv = false;
3041 bool receiver_handled = false;
3042
3043 fingerprint->iterate_args([&] (const AdapterFingerPrint::Element& arg) {
3044 BasicType bt = arg.bt();
3045 int offset = arg.offset();
3046
3047 if (long_prev) {
3048 long_prev = false;
3049 BasicType bt_to_add;
3050 if (bt == T_VOID) {
3051 bt_to_add = T_LONG;
3052 } else {
3053 bt_to_add = T_OBJECT;
3054 }
3055 if (value_object_count == 0) {
3056 SigEntry::add_entry(_sig, bt_to_add);
3057 }
3058 assert(long_prev_offset != 0, "no buffer argument here");
3059 SigEntry::add_entry(_sig_cc, bt_to_add, nullptr, long_prev_offset);
3060 if (!skipping_inline_recv) {
3061 SigEntry::add_entry(_sig_cc_ro, bt_to_add, nullptr, long_prev_offset);
3062 }
3063 }
3064
3065 switch (bt) {
3066 case T_VOID:
3067 if (prev_bt != T_LONG && prev_bt != T_DOUBLE) {
3068 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
3069 value_object_count--;
3070 SigEntry::add_entry(_sig_cc, T_VOID, nullptr, offset);
3071 if (!skipping_inline_recv) {
3072 SigEntry::add_entry(_sig_cc_ro, T_VOID, nullptr, offset);
3073 } else if (value_object_count == 0) {
3074 skipping_inline_recv = false;
3075 }
3076 assert(value_object_count >= 0, "invalid value object count");
3077 } else {
3078 // Nothing to add for _sig: We already added an addition T_VOID in add_entry() when adding T_LONG or T_DOUBLE.
3079 }
3080 break;
3081 case T_INT:
3082 case T_FLOAT:
3083 case T_DOUBLE:
3084 if (value_object_count == 0) {
3085 SigEntry::add_entry(_sig, bt);
3086 }
3087 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3088 if (!skipping_inline_recv) {
3089 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3090 }
3091 break;
3092 case T_LONG:
3093 long_prev = true;
3094 long_prev_offset = offset;
3095 break;
3096 case T_BOOLEAN:
3097 case T_CHAR:
3098 case T_BYTE:
3099 case T_SHORT:
3100 case T_OBJECT:
3101 case T_ARRAY:
3102 assert(value_object_count > 0, "must be value object field");
3103 assert(offset != 0 || (bt == T_OBJECT && prev_bt == T_METADATA), "buffer input expected here");
3104 SigEntry::add_entry(_sig_cc, bt, nullptr, offset, offset == -1, offset == 0);
3105 if (!skipping_inline_recv) {
3106 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset, offset == -1, offset == 0);
3107 }
3108 break;
3109 case T_METADATA:
3110 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
3111 if (value_object_count == 0) {
3112 SigEntry::add_entry(_sig, T_OBJECT);
3113 }
3114 SigEntry::add_entry(_sig_cc, T_METADATA, nullptr, offset);
3115 if (!skipping_inline_recv) {
3116 if (!receiver_handled && _has_inline_recv && value_object_count == 0) {
3117 SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3118 skipping_inline_recv = true;
3119 receiver_handled = true;
3120 } else {
3121 SigEntry::add_entry(_sig_cc_ro, T_METADATA, nullptr, offset);
3122 }
3123 }
3124 value_object_count++;
3125 has_scalarized_arguments = true;
3126 break;
3127 default: {
3128 fatal("Unexpected BasicType: %s", basictype_to_str(bt));
3129 }
3130 }
3131 prev_bt = bt;
3132 });
3133
3134 if (long_prev) {
3135 // If previous bt was T_LONG and we reached the end of the signature, we know that it must be a T_OBJECT.
3136 SigEntry::add_entry(_sig, T_OBJECT);
3137 SigEntry::add_entry(_sig_cc, T_OBJECT);
3138 SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3139 }
3140 assert(value_object_count == 0, "invalid value object count");
3141
3142 #ifdef ASSERT
3143 if (_has_inline_recv) {
3144 // In RO signatures, inline receivers must be represented as a single T_OBJECT
3145 assert(_sig_cc_ro->length() >= 1, "sig_cc_ro must include receiver");
3146 assert(_sig_cc_ro->at(0)._bt == T_OBJECT,
3147 "sig_cc_ro must represent inline receiver as T_OBJECT");
3148 assert(_sig_cc_ro->length() <= _sig_cc->length(),
3149 "sig_cc_ro must not be longer than sig_cc");
3150 }
3151 #endif
3152
3153 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3154 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3155
3156 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3157 if (has_scalarized_arguments) {
3158 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3159 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3160
3161 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3162 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3163
3164 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3165 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3166 } else {
3167 // No scalarized args
3168 _sig_cc = _sig;
3169 _regs_cc = _regs;
3170 _args_on_stack_cc = _args_on_stack;
3171
3172 _sig_cc_ro = _sig;
3173 _regs_cc_ro = _regs;
3174 _args_on_stack_cc_ro = _args_on_stack;
3175 }
3176
3177 #ifdef ASSERT
3178 {
3179 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(_sig_cc, _has_inline_recv);
3180 assert(fingerprint->equals(compare_fp), "%s - %s", fingerprint->as_string(), compare_fp->as_string());
3181 AdapterFingerPrint::deallocate(compare_fp);
3182 }
3183 #endif
3184 }
3185
3186 const char* AdapterHandlerEntry::_entry_names[] = {
3187 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
3188 };
3189
3190 #ifdef ASSERT
3191 void AdapterHandlerLibrary::verify_adapter_sharing(CompiledEntrySignature& ces, AdapterHandlerEntry* cached_entry) {
3192 // we can only check for the same code if there is any
3193 #ifndef ZERO
3194 AdapterHandlerEntry* comparison_entry = create_adapter(ces, false, true);
3195 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
3196 assert(comparison_entry->compare_code(cached_entry), "code must match");
3197 // Release the one just created
3198 AdapterHandlerEntry::deallocate(comparison_entry);
3199 # endif // ZERO
3200 }
3201 #endif /* ASSERT*/
3202
3203 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3204 assert(!method->is_abstract() || InlineTypePassFieldsAsArgs, "abstract methods do not have adapters");
3205 // Use customized signature handler. Need to lock around updates to
3206 // the _adapter_handler_table (it is not safe for concurrent readers
3207 // and a single writer: this could be fixed if it becomes a
3208 // problem).
3209
3210 // Fast-path for trivial adapters
3211 AdapterHandlerEntry* entry = get_simple_adapter(method);
3212 if (entry != nullptr) {
3213 return entry;
3214 }
3215
3216 ResourceMark rm;
3217 bool new_entry = false;
3218
3219 CompiledEntrySignature ces(method());
3220 ces.compute_calling_conventions();
3221 if (ces.has_scalarized_args()) {
3222 if (!method->has_scalarized_args()) {
3223 method->set_has_scalarized_args();
3224 }
3225 if (ces.c1_needs_stack_repair()) {
3226 method->set_c1_needs_stack_repair();
3227 }
3228 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
3229 method->set_c2_needs_stack_repair();
3230 }
3231 }
3232
3233 {
3234 MutexLocker mu(AdapterHandlerLibrary_lock);
3235
3236 // Lookup method signature's fingerprint
3237 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3238
3239 if (entry != nullptr) {
3240 #ifndef ZERO
3241 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
3242 #endif
3243 #ifdef ASSERT
3244 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
3245 verify_adapter_sharing(ces, entry);
3246 }
3247 #endif
3248 } else {
3249 entry = create_adapter(ces, /* allocate_code_blob */ true);
3250 if (entry != nullptr) {
3251 new_entry = true;
3252 }
3253 }
3254 }
3255
3256 // Outside of the lock
3257 if (new_entry) {
3258 post_adapter_creation(entry);
3259 }
3260 return entry;
3261 }
3262
3263 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
3264 ResourceMark rm;
3265 const char* name = AdapterHandlerLibrary::name(handler);
3266 const uint32_t id = AdapterHandlerLibrary::id(handler);
3267
3268 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
3269 if (blob != nullptr) {
3270 handler->set_adapter_blob(blob->as_adapter_blob());
3271 }
3272 }
3273
3274 #ifndef PRODUCT
3275 void AdapterHandlerLibrary::print_adapter_handler_info(outputStream* st, AdapterHandlerEntry* handler) {
3276 ttyLocker ttyl;
3277 ResourceMark rm;
3278 int insts_size;
3279 // on Zero the blob may be null
3280 handler->print_adapter_on(tty);
3281 AdapterBlob* adapter_blob = handler->adapter_blob();
3282 if (adapter_blob == nullptr) {
3283 return;
3284 }
3285 insts_size = adapter_blob->code_size();
3286 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
3287 handler->fingerprint()->as_basic_args_string(),
3288 handler->fingerprint()->as_string(), insts_size);
3289 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
3290 if (Verbose || PrintStubCode) {
3291 address first_pc = adapter_blob->content_begin();
3292 if (first_pc != nullptr) {
3293 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
3294 st->cr();
3295 }
3296 }
3297 }
3298 #endif // PRODUCT
3299
3300 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
3301 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
3302 entry_offset[AdapterBlob::I2C] = 0;
3303 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
3304 entry_offset[AdapterBlob::C2I_Inline] = entry_address[AdapterBlob::C2I_Inline] - entry_address[AdapterBlob::I2C];
3305 entry_offset[AdapterBlob::C2I_Inline_RO] = entry_address[AdapterBlob::C2I_Inline_RO] - entry_address[AdapterBlob::I2C];
3306 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
3307 entry_offset[AdapterBlob::C2I_Unverified_Inline] = entry_address[AdapterBlob::C2I_Unverified_Inline] - entry_address[AdapterBlob::I2C];
3308 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
3309 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
3310 } else {
3311 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
3312 }
3313 }
3314
3315 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
3316 CompiledEntrySignature& ces,
3317 bool allocate_code_blob,
3318 bool is_transient) {
3319 if (log_is_enabled(Info, perf, class, link)) {
3320 ClassLoader::perf_method_adapters_count()->inc();
3321 }
3322
3323 #ifndef ZERO
3324 AdapterBlob* adapter_blob = nullptr;
3325 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3326 CodeBuffer buffer(buf);
3327 short buffer_locs[20];
3328 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3329 sizeof(buffer_locs)/sizeof(relocInfo));
3330 MacroAssembler masm(&buffer);
3331 address entry_address[AdapterBlob::ENTRY_COUNT];
3332
3333 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
3334 SharedRuntime::generate_i2c2i_adapters(&masm,
3335 ces.args_on_stack(),
3336 ces.sig(),
3337 ces.regs(),
3338 ces.sig_cc(),
3339 ces.regs_cc(),
3340 ces.sig_cc_ro(),
3341 ces.regs_cc_ro(),
3342 entry_address,
3343 adapter_blob,
3344 allocate_code_blob);
3345
3346 if (ces.has_scalarized_args()) {
3347 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3348 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3349 heap_sig->appendAll(ces.sig_cc());
3350 handler->set_sig_cc(heap_sig);
3351 heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
3352 heap_sig->appendAll(ces.sig_cc_ro());
3353 handler->set_sig_cc_ro(heap_sig);
3354 }
3355 // On zero there is no code to save and no need to create a blob and
3356 // or relocate the handler.
3357 int entry_offset[AdapterBlob::ENTRY_COUNT];
3358 address_to_offset(entry_address, entry_offset);
3359 #ifdef ASSERT
3360 if (VerifyAdapterSharing) {
3361 handler->save_code(buf->code_begin(), buffer.insts_size());
3362 if (is_transient) {
3363 return true;
3364 }
3365 }
3366 #endif
3367 if (adapter_blob == nullptr) {
3368 // CodeCache is full, disable compilation
3369 // Ought to log this but compile log is only per compile thread
3370 // and we're some non descript Java thread.
3371 return false;
3372 }
3373 handler->set_adapter_blob(adapter_blob);
3374 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
3375 // try to save generated code
3376 const char* name = AdapterHandlerLibrary::name(handler);
3377 const uint32_t id = AdapterHandlerLibrary::id(handler);
3378 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
3379 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
3380 }
3381 #endif // ZERO
3382
3383 #ifndef PRODUCT
3384 // debugging support
3385 if (PrintAdapterHandlers || PrintStubCode) {
3386 print_adapter_handler_info(tty, handler);
3387 }
3388 #endif
3389
3390 return true;
3391 }
3392
3393 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(CompiledEntrySignature& ces,
3394 bool allocate_code_blob,
3395 bool is_transient) {
3396 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(ces.sig_cc(), ces.has_inline_recv());
3397 #ifdef ASSERT
3398 // Verify that we can successfully restore the compiled entry signature object.
3399 CompiledEntrySignature ces_verify;
3400 ces_verify.initialize_from_fingerprint(fp);
3401 #endif
3402 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
3403 if (!generate_adapter_code(handler, ces, allocate_code_blob, is_transient)) {
3404 AdapterHandlerEntry::deallocate(handler);
3405 return nullptr;
3406 }
3407 if (!is_transient) {
3408 assert_lock_strong(AdapterHandlerLibrary_lock);
3409 _adapter_handler_table->put(fp, handler);
3410 }
3411 return handler;
3412 }
3413
3414 #if INCLUDE_CDS
3415 void AdapterHandlerEntry::remove_unshareable_info() {
3416 #ifdef ASSERT
3417 _saved_code = nullptr;
3418 _saved_code_length = 0;
3419 #endif // ASSERT
3420 _adapter_blob = nullptr;
3421 _linked = false;
3422 _sig_cc = nullptr;
3423 _sig_cc_ro = nullptr;
3424 }
3425
3426 class CopyAdapterTableToArchive : StackObj {
3427 private:
3428 CompactHashtableWriter* _writer;
3429 ArchiveBuilder* _builder;
3430 public:
3431 CopyAdapterTableToArchive(CompactHashtableWriter* writer) : _writer(writer),
3432 _builder(ArchiveBuilder::current())
3433 {}
3434
3435 bool do_entry(AdapterFingerPrint* fp, AdapterHandlerEntry* entry) {
3436 LogStreamHandle(Trace, aot) lsh;
3437 if (ArchiveBuilder::current()->has_been_archived((address)entry)) {
3438 assert(ArchiveBuilder::current()->has_been_archived((address)fp), "must be");
3439 AdapterFingerPrint* buffered_fp = ArchiveBuilder::current()->get_buffered_addr(fp);
3440 assert(buffered_fp != nullptr,"sanity check");
3441 AdapterHandlerEntry* buffered_entry = ArchiveBuilder::current()->get_buffered_addr(entry);
3442 assert(buffered_entry != nullptr,"sanity check");
3443
3444 uint hash = fp->compute_hash();
3445 _writer->add(hash, AOTCompressedPointers::encode_not_null(buffered_entry));
3446 if (lsh.is_enabled()) {
3447 address fp_runtime_addr = (address)buffered_fp + ArchiveBuilder::current()->buffer_to_requested_delta();
3448 address entry_runtime_addr = (address)buffered_entry + ArchiveBuilder::current()->buffer_to_requested_delta();
3449 log_trace(aot)("Added fp=%p (%s), entry=%p to the archived adater table", buffered_fp, buffered_fp->as_basic_args_string(), buffered_entry);
3450 }
3451 } else {
3452 if (lsh.is_enabled()) {
3453 log_trace(aot)("Skipping adapter handler %p (fp=%s) as it is not archived", entry, fp->as_basic_args_string());
3454 }
3455 }
3456 return true;
3457 }
3458 };
3459
3460 void AdapterHandlerLibrary::dump_aot_adapter_table() {
3461 CompactHashtableStats stats;
3462 CompactHashtableWriter writer(_adapter_handler_table->number_of_entries(), &stats);
3463 CopyAdapterTableToArchive copy(&writer);
3464 _adapter_handler_table->iterate(©);
3465 writer.dump(&_aot_adapter_handler_table, "archived adapter table");
3466 }
3467
3468 void AdapterHandlerLibrary::serialize_shared_table_header(SerializeClosure* soc) {
3469 _aot_adapter_handler_table.serialize_header(soc);
3470 }
3471
3472 void AdapterHandlerLibrary::link_aot_adapter_handler(AdapterHandlerEntry* handler) {
3473 #ifdef ASSERT
3474 if (TestAOTAdapterLinkFailure) {
3475 return;
3476 }
3477 #endif
3478 lookup_aot_cache(handler);
3479 #ifndef PRODUCT
3480 // debugging support
3481 if (PrintAdapterHandlers || PrintStubCode) {
3482 print_adapter_handler_info(tty, handler);
3483 }
3484 #endif
3485 }
3486
3487 // This method is used during production run to link archived adapters (stored in AOT Cache)
3488 // to their code in AOT Code Cache
3489 void AdapterHandlerEntry::link() {
3490 ResourceMark rm;
3491 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3492 bool generate_code = false;
3493 // Generate code only if AOTCodeCache is not available, or
3494 // caching adapters is disabled, or we fail to link
3495 // the AdapterHandlerEntry to its code in the AOTCodeCache
3496 if (AOTCodeCache::is_using_adapter()) {
3497 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3498 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3499 if (_adapter_blob == nullptr) {
3500 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3501 generate_code = true;
3502 }
3503
3504 if (get_sig_cc() == nullptr) {
3505 // Calling conventions have to be regenerated at runtime and are accessed through method adapters,
3506 // which are archived in the AOT code cache. If the adapters are not regenerated, the
3507 // calling conventions should be regenerated here.
3508 CompiledEntrySignature ces;
3509 ces.initialize_from_fingerprint(_fingerprint);
3510 if (ces.has_scalarized_args()) {
3511 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3512 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3513 heap_sig->appendAll(ces.sig_cc());
3514 set_sig_cc(heap_sig);
3515 heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
3516 heap_sig->appendAll(ces.sig_cc_ro());
3517 set_sig_cc_ro(heap_sig);
3518 }
3519 }
3520 } else {
3521 generate_code = true;
3522 }
3523 if (generate_code) {
3524 CompiledEntrySignature ces;
3525 ces.initialize_from_fingerprint(_fingerprint);
3526 if (!AdapterHandlerLibrary::generate_adapter_code(this, ces, true, false)) {
3527 // Don't throw exceptions during VM initialization because java.lang.* classes
3528 // might not have been initialized, causing problems when constructing the
3529 // Java exception object.
3530 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3531 }
3532 }
3533 if (_adapter_blob != nullptr) {
3534 post_adapter_creation(this);
3535 }
3536 assert(_linked, "AdapterHandlerEntry must now be linked");
3537 }
3538
3539 void AdapterHandlerLibrary::link_aot_adapters() {
3540 uint max_id = 0;
3541 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3542 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3543 * That implies adapter ids of the adapters in the cache may not be contiguous.
3544 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3545 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3546 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3547 */
3548 _aot_adapter_handler_table.iterate_all([&](AdapterHandlerEntry* entry) {
3549 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3550 entry->link();
3551 max_id = MAX2(max_id, entry->id());
3552 });
3553 // Set adapter id to the maximum id found in the AOTCache
3554 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3555 _id_counter = max_id;
3556 }
3557
3558 // This method is called during production run to lookup simple adapters
3559 // in the archived adapter handler table
3560 void AdapterHandlerLibrary::lookup_simple_adapters() {
3561 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3562
3563 MutexLocker mu(AdapterHandlerLibrary_lock);
3564 ResourceMark rm;
3565 CompiledEntrySignature no_args;
3566 no_args.compute_calling_conventions();
3567 _no_arg_handler = lookup(no_args.sig_cc(), no_args.has_inline_recv());
3568
3569 CompiledEntrySignature obj_args;
3570 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
3571 obj_args.compute_calling_conventions();
3572 _obj_arg_handler = lookup(obj_args.sig_cc(), obj_args.has_inline_recv());
3573
3574 CompiledEntrySignature int_args;
3575 SigEntry::add_entry(int_args.sig(), T_INT);
3576 int_args.compute_calling_conventions();
3577 _int_arg_handler = lookup(int_args.sig_cc(), int_args.has_inline_recv());
3578
3579 CompiledEntrySignature obj_int_args;
3580 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
3581 SigEntry::add_entry(obj_int_args.sig(), T_INT);
3582 obj_int_args.compute_calling_conventions();
3583 _obj_int_arg_handler = lookup(obj_int_args.sig_cc(), obj_int_args.has_inline_recv());
3584
3585 CompiledEntrySignature obj_obj_args;
3586 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3587 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3588 obj_obj_args.compute_calling_conventions();
3589 _obj_obj_arg_handler = lookup(obj_obj_args.sig_cc(), obj_obj_args.has_inline_recv());
3590
3591 assert(_no_arg_handler != nullptr &&
3592 _obj_arg_handler != nullptr &&
3593 _int_arg_handler != nullptr &&
3594 _obj_int_arg_handler != nullptr &&
3595 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3596 assert(_no_arg_handler->is_linked() &&
3597 _obj_arg_handler->is_linked() &&
3598 _int_arg_handler->is_linked() &&
3599 _obj_int_arg_handler->is_linked() &&
3600 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3601 }
3602 #endif // INCLUDE_CDS
3603
3604 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3605 LogStreamHandle(Trace, aot) lsh;
3606 if (lsh.is_enabled()) {
3607 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3608 lsh.cr();
3609 }
3610 it->push(&_fingerprint);
3611 }
3612
3613 AdapterHandlerEntry::~AdapterHandlerEntry() {
3614 if (_fingerprint != nullptr) {
3615 AdapterFingerPrint::deallocate(_fingerprint);
3616 _fingerprint = nullptr;
3617 }
3618 if (_sig_cc != nullptr) {
3619 delete _sig_cc;
3620 }
3621 if (_sig_cc_ro != nullptr) {
3622 delete _sig_cc_ro;
3623 }
3624 #ifdef ASSERT
3625 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3626 #endif
3627 FreeHeap(this);
3628 }
3629
3630
3631 #ifdef ASSERT
3632 // Capture the code before relocation so that it can be compared
3633 // against other versions. If the code is captured after relocation
3634 // then relative instructions won't be equivalent.
3635 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3636 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3637 _saved_code_length = length;
3638 memcpy(_saved_code, buffer, length);
3639 }
3640
3641
3642 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3643 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3644
3645 if (other->_saved_code_length != _saved_code_length) {
3646 return false;
3647 }
3648
3649 return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
3650 }
3651 #endif
3652
3653
3654 /**
3655 * Create a native wrapper for this native method. The wrapper converts the
3656 * Java-compiled calling convention to the native convention, handles
3657 * arguments, and transitions to native. On return from the native we transition
3658 * back to java blocking if a safepoint is in progress.
3659 */
3660 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3661 ResourceMark rm;
3662 nmethod* nm = nullptr;
3663
3664 // Check if memory should be freed before allocation
3665 CodeCache::gc_on_allocation();
3666
3667 assert(method->is_native(), "must be native");
3668 assert(method->is_special_native_intrinsic() ||
3669 method->has_native_function(), "must have something valid to call!");
3670
3671 {
3672 // Perform the work while holding the lock, but perform any printing outside the lock
3673 MutexLocker mu(AdapterHandlerLibrary_lock);
3674 // See if somebody beat us to it
3675 if (method->code() != nullptr) {
3676 return;
3677 }
3678
3679 const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3680 assert(compile_id > 0, "Must generate native wrapper");
3681
3682
3683 ResourceMark rm;
3684 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3685 if (buf != nullptr) {
3686 CodeBuffer buffer(buf);
3687
3688 if (method->is_continuation_enter_intrinsic()) {
3689 buffer.initialize_stubs_size(192);
3690 }
3691
3692 struct { double data[20]; } locs_buf;
3693 struct { double data[20]; } stubs_locs_buf;
3694 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3695 #if defined(AARCH64) || defined(PPC64)
3696 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3697 // in the constant pool to ensure ordering between the barrier and oops
3698 // accesses. For native_wrappers we need a constant.
3699 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3700 // static java call that is resolved in the runtime.
3701 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3702 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3703 }
3704 #endif
3705 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3706 MacroAssembler _masm(&buffer);
3707
3708 // Fill in the signature array, for the calling-convention call.
3709 const int total_args_passed = method->size_of_parameters();
3710
3711 BasicType stack_sig_bt[16];
3712 VMRegPair stack_regs[16];
3713 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3714 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3715
3716 int i = 0;
3717 if (!method->is_static()) { // Pass in receiver first
3718 sig_bt[i++] = T_OBJECT;
3719 }
3720 SignatureStream ss(method->signature());
3721 for (; !ss.at_return_type(); ss.next()) {
3722 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3723 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3724 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3725 }
3726 }
3727 assert(i == total_args_passed, "");
3728 BasicType ret_type = ss.type();
3729
3730 // Now get the compiled-Java arguments layout.
3731 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3732
3733 // Generate the compiled-to-native wrapper code
3734 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3735
3736 if (nm != nullptr) {
3737 {
3738 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3739 if (nm->make_in_use()) {
3740 method->set_code(method, nm);
3741 }
3742 }
3743
3744 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3745 if (directive->PrintAssemblyOption) {
3746 nm->print_code();
3747 }
3748 DirectivesStack::release(directive);
3749 }
3750 }
3751 } // Unlock AdapterHandlerLibrary_lock
3752
3753
3754 // Install the generated code.
3755 if (nm != nullptr) {
3756 const char *msg = method->is_static() ? "(static)" : "";
3757 CompileTask::print_ul(nm, msg);
3758 if (PrintCompilation) {
3759 ttyLocker ttyl;
3760 CompileTask::print(tty, nm, msg);
3761 }
3762 nm->post_compiled_method_load_event();
3763 }
3764 }
3765
3766 // -------------------------------------------------------------------------
3767 // Java-Java calling convention
3768 // (what you use when Java calls Java)
3769
3770 //------------------------------name_for_receiver----------------------------------
3771 // For a given signature, return the VMReg for parameter 0.
3772 VMReg SharedRuntime::name_for_receiver() {
3773 VMRegPair regs;
3774 BasicType sig_bt = T_OBJECT;
3775 (void) java_calling_convention(&sig_bt, ®s, 1);
3776 // Return argument 0 register. In the LP64 build pointers
3777 // take 2 registers, but the VM wants only the 'main' name.
3778 return regs.first();
3779 }
3780
3781 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
3782 // This method is returning a data structure allocating as a
3783 // ResourceObject, so do not put any ResourceMarks in here.
3784
3785 BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
3786 VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
3787 int cnt = 0;
3788 if (has_receiver) {
3789 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
3790 }
3791
3792 for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) {
3793 BasicType type = ss.type();
3794 sig_bt[cnt++] = type;
3795 if (is_double_word_type(type))
3796 sig_bt[cnt++] = T_VOID;
3797 }
3798
3799 if (has_appendix) {
3800 sig_bt[cnt++] = T_OBJECT;
3801 }
3802
3803 assert(cnt < 256, "grow table size");
3804
3805 int comp_args_on_stack;
3806 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt);
3807
3808 // the calling convention doesn't count out_preserve_stack_slots so
3809 // we must add that in to get "true" stack offsets.
3810
3811 if (comp_args_on_stack) {
3812 for (int i = 0; i < cnt; i++) {
3813 VMReg reg1 = regs[i].first();
3814 if (reg1->is_stack()) {
3815 // Yuck
3816 reg1 = reg1->bias(out_preserve_stack_slots());
3817 }
3818 VMReg reg2 = regs[i].second();
3819 if (reg2->is_stack()) {
3820 // Yuck
3821 reg2 = reg2->bias(out_preserve_stack_slots());
3822 }
3823 regs[i].set_pair(reg2, reg1);
3824 }
3825 }
3826
3827 // results
3828 *arg_size = cnt;
3829 return regs;
3830 }
3831
3832 // OSR Migration Code
3833 //
3834 // This code is used convert interpreter frames into compiled frames. It is
3835 // called from very start of a compiled OSR nmethod. A temp array is
3836 // allocated to hold the interesting bits of the interpreter frame. All
3837 // active locks are inflated to allow them to move. The displaced headers and
3838 // active interpreter locals are copied into the temp buffer. Then we return
3839 // back to the compiled code. The compiled code then pops the current
3840 // interpreter frame off the stack and pushes a new compiled frame. Then it
3841 // copies the interpreter locals and displaced headers where it wants.
3842 // Finally it calls back to free the temp buffer.
3843 //
3844 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3845
3846 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
3847 assert(current == JavaThread::current(), "pre-condition");
3848 JFR_ONLY(Jfr::check_and_process_sample_request(current);)
3849 // During OSR migration, we unwind the interpreted frame and replace it with a compiled
3850 // frame. The stack watermark code below ensures that the interpreted frame is processed
3851 // before it gets unwound. This is helpful as the size of the compiled frame could be
3852 // larger than the interpreted frame, which could result in the new frame not being
3853 // processed correctly.
3854 StackWatermarkSet::before_unwind(current);
3855
3856 //
3857 // This code is dependent on the memory layout of the interpreter local
3858 // array and the monitors. On all of our platforms the layout is identical
3859 // so this code is shared. If some platform lays the their arrays out
3860 // differently then this code could move to platform specific code or
3861 // the code here could be modified to copy items one at a time using
3862 // frame accessor methods and be platform independent.
3863
3864 frame fr = current->last_frame();
3865 assert(fr.is_interpreted_frame(), "");
3866 assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
3867
3868 // Figure out how many monitors are active.
3869 int active_monitor_count = 0;
3870 for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
3871 kptr < fr.interpreter_frame_monitor_begin();
3872 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
3873 if (kptr->obj() != nullptr) active_monitor_count++;
3874 }
3875
3876 // QQQ we could place number of active monitors in the array so that compiled code
3877 // could double check it.
3878
3879 Method* moop = fr.interpreter_frame_method();
3880 int max_locals = moop->max_locals();
3881 // Allocate temp buffer, 1 word per local & 2 per active monitor
3882 int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
3883 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
3884
3885 // Copy the locals. Order is preserved so that loading of longs works.
3886 // Since there's no GC I can copy the oops blindly.
3887 assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
3888 Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
3889 (HeapWord*)&buf[0],
3890 max_locals);
3891
3892 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
3893 int i = max_locals;
3894 for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3895 kptr2 < fr.interpreter_frame_monitor_begin();
3896 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3897 if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array
3898 BasicLock *lock = kptr2->lock();
3899 if (UseObjectMonitorTable) {
3900 buf[i] = (intptr_t)lock->object_monitor_cache();
3901 }
3902 #ifdef ASSERT
3903 else {
3904 buf[i] = badDispHeaderOSR;
3905 }
3906 #endif
3907 i++;
3908 buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3909 }
3910 }
3911 assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3912
3913 RegisterMap map(current,
3914 RegisterMap::UpdateMap::skip,
3915 RegisterMap::ProcessFrames::include,
3916 RegisterMap::WalkContinuation::skip);
3917 frame sender = fr.sender(&map);
3918 if (sender.is_interpreted_frame()) {
3919 current->push_cont_fastpath(sender.unextended_sp());
3920 }
3921
3922 return buf;
3923 JRT_END
3924
3925 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3926 FREE_C_HEAP_ARRAY(intptr_t, buf);
3927 JRT_END
3928
3929 const char* AdapterHandlerLibrary::name(AdapterHandlerEntry* handler) {
3930 return handler->fingerprint()->as_basic_args_string();
3931 }
3932
3933 uint32_t AdapterHandlerLibrary::id(AdapterHandlerEntry* handler) {
3934 return handler->id();
3935 }
3936
3937 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3938 bool found = false;
3939 #if INCLUDE_CDS
3940 if (AOTCodeCache::is_using_adapter()) {
3941 auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3942 if (b == handler->adapter_blob()) {
3943 found = true;
3944 st->print("Adapter for signature: ");
3945 handler->print_adapter_on(st);
3946 return false; // abort iteration
3947 } else {
3948 return true; // keep looking
3949 }
3950 };
3951 _aot_adapter_handler_table.iterate(findblob_archived_table);
3952 }
3953 #endif // INCLUDE_CDS
3954 if (!found) {
3955 auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* handler) {
3956 if (b == handler->adapter_blob()) {
3957 found = true;
3958 st->print("Adapter for signature: ");
3959 handler->print_adapter_on(st);
3960 return false; // abort iteration
3961 } else {
3962 return true; // keep looking
3963 }
3964 };
3965 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3966 _adapter_handler_table->iterate(findblob_runtime_table);
3967 }
3968 assert(found, "Should have found handler");
3969 }
3970
3971 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3972 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3973 if (adapter_blob() != nullptr) {
3974 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3975 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3976 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3977 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3978 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3979 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3980 if (get_c2i_no_clinit_check_entry() != nullptr) {
3981 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3982 }
3983 }
3984 st->cr();
3985 }
3986
3987 #ifndef PRODUCT
3988
3989 void AdapterHandlerLibrary::print_statistics() {
3990 print_table_statistics();
3991 }
3992
3993 #endif /* PRODUCT */
3994
3995 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3996 assert(current == JavaThread::current(), "pre-condition");
3997 StackOverflow* overflow_state = current->stack_overflow_state();
3998 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3999 overflow_state->set_reserved_stack_activation(current->stack_base());
4000 JRT_END
4001
4002 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
4003 ResourceMark rm(current);
4004 frame activation;
4005 nmethod* nm = nullptr;
4006 int count = 1;
4007
4008 assert(fr.is_java_frame(), "Must start on Java frame");
4009
4010 RegisterMap map(JavaThread::current(),
4011 RegisterMap::UpdateMap::skip,
4012 RegisterMap::ProcessFrames::skip,
4013 RegisterMap::WalkContinuation::skip); // don't walk continuations
4014 for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
4015 if (!fr.is_java_frame()) {
4016 continue;
4017 }
4018
4019 Method* method = nullptr;
4020 bool found = false;
4021 if (fr.is_interpreted_frame()) {
4022 method = fr.interpreter_frame_method();
4023 if (method != nullptr && method->has_reserved_stack_access()) {
4024 found = true;
4025 }
4026 } else {
4027 CodeBlob* cb = fr.cb();
4028 if (cb != nullptr && cb->is_nmethod()) {
4029 nm = cb->as_nmethod();
4030 method = nm->method();
4031 for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) {
4032 method = sd->method();
4033 if (method != nullptr && method->has_reserved_stack_access()) {
4034 found = true;
4035 }
4036 }
4037 }
4038 }
4039 if (found) {
4040 activation = fr;
4041 warning("Potentially dangerous stack overflow in "
4042 "ReservedStackAccess annotated method %s [%d]",
4043 method->name_and_sig_as_C_string(), count++);
4044 EventReservedStackActivation event;
4045 if (event.should_commit()) {
4046 event.set_method(method);
4047 event.commit();
4048 }
4049 }
4050 }
4051 return activation;
4052 }
4053
4054 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
4055 // After any safepoint, just before going back to compiled code,
4056 // we inform the GC that we will be doing initializing writes to
4057 // this object in the future without emitting card-marks, so
4058 // GC may take any compensating steps.
4059
4060 oop new_obj = current->vm_result_oop();
4061 if (new_obj == nullptr) return;
4062
4063 BarrierSet *bs = BarrierSet::barrier_set();
4064 bs->on_slowpath_allocation_exit(current, new_obj);
4065 }
4066
4067 // We are at a compiled code to interpreter call. We need backing
4068 // buffers for all inline type arguments. Allocate an object array to
4069 // hold them (convenient because once we're done with it we don't have
4070 // to worry about freeing it).
4071 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, bool from_c1, TRAPS) {
4072 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
4073 ResourceMark rm;
4074
4075 // Retrieve arguments passed at the call
4076 RegisterMap reg_map2(THREAD,
4077 RegisterMap::UpdateMap::include,
4078 RegisterMap::ProcessFrames::include,
4079 RegisterMap::WalkContinuation::skip);
4080 frame stubFrame = THREAD->last_frame();
4081 frame callerFrame = stubFrame.sender(®_map2);
4082 if (from_c1) {
4083 callerFrame = callerFrame.sender(®_map2);
4084 }
4085 int arg_size;
4086 const GrowableArray<SigEntry>* sig = allocate_receiver ? callee->adapter()->get_sig_cc() : callee->adapter()->get_sig_cc_ro();
4087 assert(sig != nullptr, "sig should never be null");
4088 TempNewSymbol tmp_sig = SigEntry::create_symbol(sig);
4089 VMRegPair* reg_pairs = find_callee_arguments(tmp_sig, false, false, &arg_size);
4090
4091 int nb_slots = 0;
4092 InstanceKlass* holder = callee->method_holder();
4093 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
4094 if (allocate_receiver) {
4095 nb_slots++;
4096 }
4097 int arg_num = callee->is_static() ? 0 : 1;
4098 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4099 BasicType bt = ss.type();
4100 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4101 nb_slots++;
4102 }
4103 if (bt != T_VOID) {
4104 arg_num++;
4105 }
4106 }
4107 objArrayOop array_oop = nullptr;
4108 objArrayHandle array;
4109 arg_num = callee->is_static() ? 0 : 1;
4110 int i = 0;
4111 uint pos = 0;
4112 uint depth = 0;
4113 uint ignored = 0;
4114 if (allocate_receiver) {
4115 assert(sig->at(pos)._bt == T_METADATA, "scalarized value expected");
4116 pos++;
4117 ignored++;
4118 depth++;
4119 assert(sig->at(pos)._bt == T_OBJECT, "buffer argument");
4120 uint reg_pos = 0;
4121 assert(reg_pos < (uint)arg_size, "");
4122 VMRegPair reg_pair = reg_pairs[reg_pos];
4123 oop* buffer = callerFrame.oopmapreg_to_oop_location(reg_pair.first(), ®_map2);
4124 instanceHandle h_buffer(THREAD, (instanceOop)*buffer);
4125 InlineKlass* vk = InlineKlass::cast(holder);
4126 if (h_buffer.not_null()) {
4127 assert(h_buffer->klass() == vk, "buffer not of expected class");
4128 } else {
4129 // Only allocate if buffer passed at the call is null
4130 if (array_oop == nullptr) {
4131 array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4132 array = objArrayHandle(THREAD, array_oop);
4133 }
4134 oop res = vk->allocate_instance(CHECK_NULL);
4135 array->obj_at_put(i, res);
4136 }
4137 i++;
4138 }
4139 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4140 BasicType bt = ss.type();
4141 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4142 while (true) {
4143 BasicType bt = sig->at(pos)._bt;
4144 if (bt == T_METADATA) {
4145 depth++;
4146 ignored++;
4147 if (depth == 1) {
4148 break;
4149 }
4150 } else if (bt == T_VOID && sig->at(pos - 1)._bt != T_LONG && sig->at(pos - 1)._bt != T_DOUBLE) {
4151 ignored++;
4152 depth--;
4153 }
4154 pos++;
4155 }
4156 pos++;
4157 assert(sig->at(pos)._bt == T_OBJECT, "buffer argument expected");
4158 uint reg_pos = pos - ignored;
4159 assert(reg_pos < (uint)arg_size, "out of bound register?");
4160 VMRegPair reg_pair = reg_pairs[reg_pos];
4161 oop* buffer = callerFrame.oopmapreg_to_oop_location(reg_pair.first(), ®_map2);
4162 instanceHandle h_buffer(THREAD, (instanceOop)*buffer);
4163 InlineKlass* vk = ss.as_inline_klass(holder);
4164 assert(vk != nullptr, "Unexpected klass");
4165 if (h_buffer.not_null()) {
4166 assert(h_buffer->klass() == vk, "buffer not of expected class");
4167 } else {
4168 // Only allocate if buffer passed at the call is null
4169 if (array_oop == nullptr) {
4170 array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4171 array = objArrayHandle(THREAD, array_oop);
4172 }
4173 oop res = vk->allocate_instance(CHECK_NULL);
4174 array->obj_at_put(i, res);
4175 }
4176 i++;
4177 }
4178 if (bt != T_VOID) {
4179 arg_num++;
4180 }
4181 }
4182 return array();
4183 }
4184
4185 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
4186 methodHandle callee(current, callee_method);
4187 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, false, CHECK);
4188 current->set_vm_result_oop(array);
4189 current->set_vm_result_metadata(callee()); // TODO: required to keep callee live?
4190 JRT_END
4191
4192 // We're returning from an interpreted method: load each field into a
4193 // register following the calling convention
4194 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
4195 {
4196 assert(res->klass()->is_inline_klass(), "only inline types here");
4197 ResourceMark rm;
4198 RegisterMap reg_map(current,
4199 RegisterMap::UpdateMap::include,
4200 RegisterMap::ProcessFrames::include,
4201 RegisterMap::WalkContinuation::skip);
4202 frame stubFrame = current->last_frame();
4203 frame callerFrame = stubFrame.sender(®_map);
4204 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
4205
4206 InlineKlass* vk = InlineKlass::cast(res->klass());
4207
4208 const Array<SigEntry>* sig_vk = vk->extended_sig();
4209 const Array<VMRegPair>* regs = vk->return_regs();
4210
4211 if (regs == nullptr) {
4212 // The fields of the inline klass don't fit in registers, bail out
4213 return;
4214 }
4215
4216 int j = 1;
4217 for (int i = 0; i < sig_vk->length(); i++) {
4218 BasicType bt = sig_vk->at(i)._bt;
4219 if (bt == T_METADATA) {
4220 continue;
4221 }
4222 if (bt == T_VOID) {
4223 if (sig_vk->at(i-1)._bt == T_LONG ||
4224 sig_vk->at(i-1)._bt == T_DOUBLE) {
4225 j++;
4226 }
4227 continue;
4228 }
4229 int off = sig_vk->at(i)._offset;
4230 assert(off > 0, "offset in object should be positive");
4231 VMRegPair pair = regs->at(j);
4232 address loc = reg_map.location(pair.first(), nullptr);
4233 guarantee(loc != nullptr, "bad register save location");
4234 switch(bt) {
4235 case T_BOOLEAN:
4236 *(jboolean*)loc = res->bool_field(off);
4237 break;
4238 case T_CHAR:
4239 *(jchar*)loc = res->char_field(off);
4240 break;
4241 case T_BYTE:
4242 *(jbyte*)loc = res->byte_field(off);
4243 break;
4244 case T_SHORT:
4245 *(jshort*)loc = res->short_field(off);
4246 break;
4247 case T_INT: {
4248 *(jint*)loc = res->int_field(off);
4249 break;
4250 }
4251 case T_LONG:
4252 #ifdef _LP64
4253 *(intptr_t*)loc = res->long_field(off);
4254 #else
4255 Unimplemented();
4256 #endif
4257 break;
4258 case T_OBJECT:
4259 case T_ARRAY: {
4260 *(oop*)loc = res->obj_field(off);
4261 break;
4262 }
4263 case T_FLOAT:
4264 *(jfloat*)loc = res->float_field(off);
4265 break;
4266 case T_DOUBLE:
4267 *(jdouble*)loc = res->double_field(off);
4268 break;
4269 default:
4270 ShouldNotReachHere();
4271 }
4272 j++;
4273 }
4274 assert(j == regs->length(), "missed a field?");
4275
4276 #ifdef ASSERT
4277 VMRegPair pair = regs->at(0);
4278 address loc = reg_map.location(pair.first(), nullptr);
4279 assert(*(oopDesc**)loc == res, "overwritten object");
4280 #endif
4281
4282 current->set_vm_result_oop(res);
4283 }
4284 JRT_END
4285
4286 // We've returned to an interpreted method, the interpreter needs a
4287 // reference to an inline type instance. Allocate it and initialize it
4288 // from field's values in registers.
4289 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
4290 {
4291 ResourceMark rm;
4292 RegisterMap reg_map(current,
4293 RegisterMap::UpdateMap::include,
4294 RegisterMap::ProcessFrames::include,
4295 RegisterMap::WalkContinuation::skip);
4296 frame stubFrame = current->last_frame();
4297 frame callerFrame = stubFrame.sender(®_map);
4298
4299 #ifdef ASSERT
4300 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
4301 #endif
4302
4303 if (!is_set_nth_bit(res, 0)) {
4304 // We're not returning with inline type fields in registers (the
4305 // calling convention didn't allow it for this inline klass)
4306 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
4307 current->set_vm_result_oop((oopDesc*)res);
4308 assert(verif_vk == nullptr, "broken calling convention");
4309 return;
4310 }
4311
4312 clear_nth_bit(res, 0);
4313 InlineKlass* vk = (InlineKlass*)res;
4314 assert(verif_vk == vk, "broken calling convention");
4315 assert(Metaspace::contains((void*)res), "should be klass");
4316
4317 // Allocate handles for every oop field so they are safe in case of
4318 // a safepoint when allocating
4319 GrowableArray<Handle> handles;
4320 vk->save_oop_fields(reg_map, handles);
4321
4322 // It's unsafe to safepoint until we are here
4323 JRT_BLOCK;
4324 {
4325 JavaThread* THREAD = current;
4326 oop vt = vk->realloc_result(reg_map, handles, CHECK);
4327 current->set_vm_result_oop(vt);
4328 }
4329 JRT_BLOCK_END;
4330 }
4331 JRT_END