1 /*
2 * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/codeBuffer.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.inline.hpp"
32 #include "classfile/vmClasses.hpp"
33 #include "classfile/vmSymbols.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeBlob.hpp"
36 #include "code/compiledIC.hpp"
37 #include "code/scopeDesc.hpp"
38 #include "code/vtableStubs.hpp"
39 #include "compiler/compilationPolicy.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "compiler/oopMap.hpp"
42 #include "gc/shared/barrierSet.hpp"
43 #include "gc/shared/c1/barrierSetC1.hpp"
44 #include "gc/shared/collectedHeap.hpp"
45 #include "interpreter/bytecode.hpp"
46 #include "interpreter/interpreter.hpp"
47 #include "jfr/support/jfrIntrinsics.hpp"
48 #include "logging/log.hpp"
49 #include "memory/oopFactory.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "memory/universe.hpp"
52 #include "oops/access.inline.hpp"
53 #include "oops/arrayProperties.hpp"
54 #include "oops/flatArrayKlass.hpp"
55 #include "oops/flatArrayOop.inline.hpp"
56 #include "oops/objArrayKlass.hpp"
57 #include "oops/objArrayOop.inline.hpp"
58 #include "oops/oop.inline.hpp"
59 #include "prims/jvmtiExport.hpp"
60 #include "runtime/atomicAccess.hpp"
61 #include "runtime/fieldDescriptor.inline.hpp"
62 #include "runtime/frame.inline.hpp"
63 #include "runtime/handles.inline.hpp"
64 #include "runtime/interfaceSupport.inline.hpp"
65 #include "runtime/javaCalls.hpp"
66 #include "runtime/sharedRuntime.hpp"
67 #include "runtime/stackWatermarkSet.hpp"
68 #include "runtime/stubInfo.hpp"
69 #include "runtime/stubRoutines.hpp"
70 #include "runtime/vframe.inline.hpp"
71 #include "runtime/vframeArray.hpp"
72 #include "runtime/vm_version.hpp"
73 #include "utilities/copy.hpp"
74 #include "utilities/events.hpp"
75
76
77 // Implementation of StubAssembler
78
79 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
80 _name = name;
81 _must_gc_arguments = false;
82 _frame_size = no_frame_size;
83 _num_rt_args = 0;
84 _stub_id = stub_id;
85 }
86
87
88 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
89 _name = name;
90 _must_gc_arguments = must_gc_arguments;
91 }
92
93
94 void StubAssembler::set_frame_size(int size) {
95 if (_frame_size == no_frame_size) {
96 _frame_size = size;
97 }
98 assert(_frame_size == size, "can't change the frame size");
99 }
100
101
102 void StubAssembler::set_num_rt_args(int args) {
103 if (_num_rt_args == 0) {
104 _num_rt_args = args;
105 }
106 assert(_num_rt_args == args, "can't change the number of args");
107 }
108
109 // Implementation of Runtime1
110 CodeBlob* Runtime1::_blobs[StubInfo::C1_STUB_COUNT];
111
112 #ifndef PRODUCT
113 // statistics
114 uint Runtime1::_generic_arraycopystub_cnt = 0;
115 uint Runtime1::_arraycopy_slowcase_cnt = 0;
116 uint Runtime1::_arraycopy_checkcast_cnt = 0;
117 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
118 uint Runtime1::_new_type_array_slowcase_cnt = 0;
119 uint Runtime1::_new_object_array_slowcase_cnt = 0;
120 uint Runtime1::_new_null_free_array_slowcase_cnt = 0;
121 uint Runtime1::_new_instance_slowcase_cnt = 0;
122 uint Runtime1::_new_multi_array_slowcase_cnt = 0;
123 uint Runtime1::_load_flat_array_slowcase_cnt = 0;
124 uint Runtime1::_store_flat_array_slowcase_cnt = 0;
125 uint Runtime1::_substitutability_check_slowcase_cnt = 0;
126 uint Runtime1::_buffer_inline_args_slowcase_cnt = 0;
127 uint Runtime1::_buffer_inline_args_no_receiver_slowcase_cnt = 0;
128 uint Runtime1::_monitorenter_slowcase_cnt = 0;
129 uint Runtime1::_monitorexit_slowcase_cnt = 0;
130 uint Runtime1::_patch_code_slowcase_cnt = 0;
131 uint Runtime1::_throw_range_check_exception_count = 0;
132 uint Runtime1::_throw_index_exception_count = 0;
133 uint Runtime1::_throw_div0_exception_count = 0;
134 uint Runtime1::_throw_null_pointer_exception_count = 0;
135 uint Runtime1::_throw_class_cast_exception_count = 0;
136 uint Runtime1::_throw_incompatible_class_change_error_count = 0;
137 uint Runtime1::_throw_illegal_monitor_state_exception_count = 0;
138 uint Runtime1::_throw_identity_exception_count = 0;
139 uint Runtime1::_throw_count = 0;
140
141 static uint _byte_arraycopy_stub_cnt = 0;
142 static uint _short_arraycopy_stub_cnt = 0;
143 static uint _int_arraycopy_stub_cnt = 0;
144 static uint _long_arraycopy_stub_cnt = 0;
145 static uint _oop_arraycopy_stub_cnt = 0;
146
147 address Runtime1::arraycopy_count_address(BasicType type) {
148 switch (type) {
149 case T_BOOLEAN:
150 case T_BYTE: return (address)&_byte_arraycopy_stub_cnt;
151 case T_CHAR:
152 case T_SHORT: return (address)&_short_arraycopy_stub_cnt;
153 case T_FLOAT:
154 case T_INT: return (address)&_int_arraycopy_stub_cnt;
155 case T_DOUBLE:
156 case T_LONG: return (address)&_long_arraycopy_stub_cnt;
157 case T_ARRAY:
158 case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;
159 default:
160 ShouldNotReachHere();
161 return nullptr;
162 }
163 }
164
165
166 #endif
167
168 // Simple helper to see if the caller of a runtime stub which
169 // entered the VM has been deoptimized
170
171 static bool caller_is_deopted(JavaThread* current) {
172 RegisterMap reg_map(current,
173 RegisterMap::UpdateMap::skip,
174 RegisterMap::ProcessFrames::include,
175 RegisterMap::WalkContinuation::skip);
176 frame runtime_frame = current->last_frame();
177 frame caller_frame = runtime_frame.sender(®_map);
178 assert(caller_frame.is_compiled_frame(), "must be compiled");
179 return caller_frame.is_deoptimized_frame();
180 }
181
182 // Stress deoptimization
183 static void deopt_caller(JavaThread* current) {
184 if (!caller_is_deopted(current)) {
185 RegisterMap reg_map(current,
186 RegisterMap::UpdateMap::skip,
187 RegisterMap::ProcessFrames::include,
188 RegisterMap::WalkContinuation::skip);
189 frame runtime_frame = current->last_frame();
190 frame caller_frame = runtime_frame.sender(®_map);
191 Deoptimization::deoptimize_frame(current, caller_frame.id());
192 assert(caller_is_deopted(current), "Must be deoptimized");
193 }
194 }
195
196 class C1StubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
197 private:
198 StubId _id;
199 public:
200 C1StubAssemblerCodeGenClosure(StubId id) : _id(id) {
201 assert(StubInfo::is_c1(_id), "not a c1 stub id %s", StubInfo::name(_id));
202 }
203 virtual OopMapSet* generate_code(StubAssembler* sasm) {
204 return Runtime1::generate_code_for(_id, sasm);
205 }
206 };
207
208 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
209 if (id != StubId::NO_STUBID) {
210 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C1Blob, StubInfo::blob(id));
211 if (blob != nullptr) {
212 return blob;
213 }
214 }
215
216 ResourceMark rm;
217 // create code buffer for code storage
218 CodeBuffer code(buffer_blob);
219
220 OopMapSet* oop_maps;
221 int frame_size;
222 bool must_gc_arguments;
223
224 Compilation::setup_code_buffer(&code, 0);
225
226 // create assembler for code generation
227 StubAssembler* sasm = new StubAssembler(&code, name, (int)id);
228 // generate code for runtime stub
229 oop_maps = cl->generate_code(sasm);
230 assert(oop_maps == nullptr || sasm->frame_size() != no_frame_size,
231 "if stub has an oop map it must have a valid frame size");
232 assert(!expect_oop_map || oop_maps != nullptr, "must have an oopmap");
233
234 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
235 sasm->align(BytesPerWord);
236 // make sure all code is in code buffer
237 sasm->flush();
238
239 frame_size = sasm->frame_size();
240 must_gc_arguments = sasm->must_gc_arguments();
241 // create blob - distinguish a few special cases
242 CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
243 &code,
244 CodeOffsets::frame_never_safe,
245 frame_size,
246 oop_maps,
247 must_gc_arguments,
248 false /* alloc_fail_is_fatal */ );
249 if (blob != nullptr && (int)id >= 0) {
250 AOTCodeCache::store_code_blob(*blob, AOTCodeEntry::C1Blob, StubInfo::blob(id));
251 }
252 return blob;
253 }
254
255 bool Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubId id) {
256 assert(StubInfo::is_c1(id), "not a c1 stub %s", StubInfo::name(id));
257 bool expect_oop_map = true;
258 #ifdef ASSERT
259 // Make sure that stubs that need oopmaps have them
260 switch (id) {
261 // These stubs don't need to have an oopmap
262 case StubId::c1_dtrace_object_alloc_id:
263 case StubId::c1_slow_subtype_check_id:
264 case StubId::c1_fpu2long_stub_id:
265 case StubId::c1_unwind_exception_id:
266 case StubId::c1_counter_overflow_id:
267 case StubId::c1_is_instance_of_id:
268 expect_oop_map = false;
269 break;
270 default:
271 break;
272 }
273 #endif
274 C1StubAssemblerCodeGenClosure cl(id);
275 CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
276 // install blob
277 int idx = StubInfo::c1_offset(id); // will assert on non-c1 id
278 _blobs[idx] = blob;
279 return blob != nullptr;
280 }
281
282 bool Runtime1::initialize(BufferBlob* blob) {
283 // platform-dependent initialization
284 initialize_pd();
285 // iterate blobs in C1 group and generate a single stub per blob
286 StubId id = StubInfo::stub_base(StubGroup::C1);
287 StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
288 for (; id != limit; id = StubInfo::next(id)) {
289 if (!generate_blob_for(blob, id)) {
290 return false;
291 }
292 if (id == StubId::c1_forward_exception_id) {
293 // publish early c1 stubs at this point so later stubs can refer to them
294 AOTCodeCache::init_early_c1_table();
295 }
296 }
297 // printing
298 #ifndef PRODUCT
299 if (PrintSimpleStubs) {
300 ResourceMark rm;
301 id = StubInfo::stub_base(StubGroup::C1);
302 for (; id != limit; id = StubInfo::next(id)) {
303 CodeBlob* blob = blob_for(id);
304 blob->print();
305 if (blob->oop_maps() != nullptr) {
306 blob->oop_maps()->print();
307 }
308 }
309 }
310 #endif
311 BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
312 return bs->generate_c1_runtime_stubs(blob);
313 }
314
315 CodeBlob* Runtime1::blob_for(StubId id) {
316 int idx = StubInfo::c1_offset(id); // will assert on non-c1 id
317 return _blobs[idx];
318 }
319
320
321 const char* Runtime1::name_for(StubId id) {
322 return StubInfo::name(id);
323 }
324
325 const char* Runtime1::name_for_address(address entry) {
326 // iterate stubs starting from C1 group base
327 StubId id = StubInfo::stub_base(StubGroup::C1);
328 StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
329 for (; id != limit; id = StubInfo::next(id)) {
330 if (entry == entry_for(id)) return StubInfo::name(id);
331 }
332
333 #define FUNCTION_CASE(a, f) \
334 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
335
336 FUNCTION_CASE(entry, os::javaTimeMillis);
337 FUNCTION_CASE(entry, os::javaTimeNanos);
338 FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
339 FUNCTION_CASE(entry, SharedRuntime::d2f);
340 FUNCTION_CASE(entry, SharedRuntime::d2i);
341 FUNCTION_CASE(entry, SharedRuntime::d2l);
342 FUNCTION_CASE(entry, SharedRuntime::dcos);
343 FUNCTION_CASE(entry, SharedRuntime::dexp);
344 FUNCTION_CASE(entry, SharedRuntime::dlog);
345 FUNCTION_CASE(entry, SharedRuntime::dlog10);
346 FUNCTION_CASE(entry, SharedRuntime::dpow);
347 FUNCTION_CASE(entry, SharedRuntime::drem);
348 FUNCTION_CASE(entry, SharedRuntime::dsin);
349 FUNCTION_CASE(entry, SharedRuntime::dtan);
350 FUNCTION_CASE(entry, SharedRuntime::f2i);
351 FUNCTION_CASE(entry, SharedRuntime::f2l);
352 FUNCTION_CASE(entry, SharedRuntime::frem);
353 FUNCTION_CASE(entry, SharedRuntime::l2d);
354 FUNCTION_CASE(entry, SharedRuntime::l2f);
355 FUNCTION_CASE(entry, SharedRuntime::ldiv);
356 FUNCTION_CASE(entry, SharedRuntime::lmul);
357 FUNCTION_CASE(entry, SharedRuntime::lrem);
358 FUNCTION_CASE(entry, SharedRuntime::lrem);
359 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
360 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
361 FUNCTION_CASE(entry, is_instance_of);
362 FUNCTION_CASE(entry, trace_block_entry);
363 #ifdef JFR_HAVE_INTRINSICS
364 FUNCTION_CASE(entry, JfrTime::time_function());
365 #endif
366 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
367 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
368 FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
369 FUNCTION_CASE(entry, StubRoutines::dexp());
370 FUNCTION_CASE(entry, StubRoutines::dlog());
371 FUNCTION_CASE(entry, StubRoutines::dlog10());
372 FUNCTION_CASE(entry, StubRoutines::dpow());
373 FUNCTION_CASE(entry, StubRoutines::dsin());
374 FUNCTION_CASE(entry, StubRoutines::dcos());
375 FUNCTION_CASE(entry, StubRoutines::dtan());
376 FUNCTION_CASE(entry, StubRoutines::dsinh());
377 FUNCTION_CASE(entry, StubRoutines::dtanh());
378 FUNCTION_CASE(entry, StubRoutines::dcbrt());
379
380 #undef FUNCTION_CASE
381
382 // Soft float adds more runtime names.
383 return pd_name_for_address(entry);
384 }
385
386 static void allocate_instance(JavaThread* current, Klass* klass, TRAPS) {
387 #ifndef PRODUCT
388 if (PrintC1Statistics) {
389 Runtime1::_new_instance_slowcase_cnt++;
390 }
391 #endif
392 assert(klass->is_klass(), "not a class");
393 Handle holder(current, klass->klass_holder()); // keep the klass alive
394 InstanceKlass* h = InstanceKlass::cast(klass);
395 h->check_valid_for_instantiation(true, CHECK);
396 // make sure klass is initialized
397 h->initialize(CHECK);
398 // allocate instance and return via TLS
399 oop obj = h->allocate_instance(CHECK);
400 current->set_vm_result_oop(obj);
401 JRT_END
402
403 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
404 allocate_instance(current, klass, CHECK);
405 JRT_END
406
407 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
408 #ifndef PRODUCT
409 if (PrintC1Statistics) {
410 _new_type_array_slowcase_cnt++;
411 }
412 #endif
413 // Note: no handle for klass needed since they are not used
414 // anymore after new_typeArray() and no GC can happen before.
415 // (This may have to change if this code changes!)
416 assert(klass->is_klass(), "not a class");
417 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
418 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
419 current->set_vm_result_oop(obj);
420 // This is pretty rare but this runtime patch is stressful to deoptimization
421 // if we deoptimize here so force a deopt to stress the path.
422 if (DeoptimizeALot) {
423 deopt_caller(current);
424 }
425
426 JRT_END
427
428
429 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
430 #ifndef PRODUCT
431 if (PrintC1Statistics) {
432 _new_object_array_slowcase_cnt++;
433 }
434 #endif
435 // Note: no handle for klass needed since they are not used
436 // anymore after new_objArray() and no GC can happen before.
437 // (This may have to change if this code changes!)
438 assert(array_klass->is_klass(), "not a class");
439 Handle holder(current, array_klass->klass_holder()); // keep the klass alive
440 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
441 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
442 current->set_vm_result_oop(obj);
443 // This is pretty rare but this runtime patch is stressful to deoptimization
444 // if we deoptimize here so force a deopt to stress the path.
445 if (DeoptimizeALot) {
446 deopt_caller(current);
447 }
448 JRT_END
449
450
451 JRT_ENTRY(void, Runtime1::new_null_free_array(JavaThread* current, Klass* array_klass, jint length))
452 NOT_PRODUCT(_new_null_free_array_slowcase_cnt++;)
453 // TODO 8350865 This is dead code since 8325660 because null-free arrays can only be created via the factory methods that are not yet implemented in C1. Should probably be fixed by 8265122.
454
455 // Note: no handle for klass needed since they are not used
456 // anymore after new_objArray() and no GC can happen before.
457 // (This may have to change if this code changes!)
458 assert(array_klass->is_klass(), "not a class");
459 Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
460 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
461 assert(elem_klass->is_inline_klass(), "must be");
462 // Logically creates elements, ensure klass init
463 elem_klass->initialize(CHECK);
464
465 const ArrayProperties props = ArrayProperties::Default().with_null_restricted();
466 arrayOop obj = oopFactory::new_objArray(elem_klass, length, props, CHECK);
467
468 current->set_vm_result_oop(obj);
469 // This is pretty rare but this runtime patch is stressful to deoptimization
470 // if we deoptimize here so force a deopt to stress the path.
471 if (DeoptimizeALot) {
472 deopt_caller(current);
473 }
474 JRT_END
475
476
477 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
478 #ifndef PRODUCT
479 if (PrintC1Statistics) {
480 _new_multi_array_slowcase_cnt++;
481 }
482 #endif
483 assert(klass->is_klass(), "not a class");
484 assert(rank >= 1, "rank must be nonzero");
485 Handle holder(current, klass->klass_holder()); // keep the klass alive
486 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
487 current->set_vm_result_oop(obj);
488 JRT_END
489
490
491 static void profile_flat_array(JavaThread* current, bool load, bool null_free) {
492 ResourceMark rm(current);
493 vframeStream vfst(current, true);
494 assert(!vfst.at_end(), "Java frame must exist");
495 // Check if array access profiling is enabled
496 if (vfst.nm()->comp_level() != CompLevel_full_profile || !C1UpdateMethodData) {
497 return;
498 }
499 int bci = vfst.bci();
500 Method* method = vfst.method();
501 MethodData* md = method->method_data();
502 if (md != nullptr) {
503 // Lock to access ProfileData, and ensure lock is not broken by a safepoint
504 MutexLocker ml(md->extra_data_lock(), Mutex::_no_safepoint_check_flag);
505
506 ProfileData* data = md->bci_to_data(bci);
507 assert(data != nullptr, "incorrect profiling entry");
508 if (data->is_ArrayLoadData()) {
509 assert(load, "should be an array load");
510 ArrayLoadData* load_data = (ArrayLoadData*) data;
511 load_data->set_flat_array();
512 if (null_free) {
513 load_data->set_null_free_array();
514 }
515 } else {
516 assert(data->is_ArrayStoreData(), "");
517 assert(!load, "should be an array store");
518 ArrayStoreData* store_data = (ArrayStoreData*) data;
519 store_data->set_flat_array();
520 if (null_free) {
521 store_data->set_null_free_array();
522 }
523 }
524 }
525 }
526
527 JRT_ENTRY(void, Runtime1::load_flat_array(JavaThread* current, flatArrayOopDesc* array, int index))
528 assert(array->klass()->is_flatArray_klass(), "should not be called");
529 profile_flat_array(current, true, array->is_null_free_array());
530
531 NOT_PRODUCT(_load_flat_array_slowcase_cnt++;)
532 assert(array->length() > 0 && index < array->length(), "already checked");
533 flatArrayHandle vah(current, array);
534 oop obj = array->obj_at(index, CHECK);
535 current->set_vm_result_oop(obj);
536 JRT_END
537
538 JRT_ENTRY(void, Runtime1::store_flat_array(JavaThread* current, flatArrayOopDesc* array, int index, oopDesc* value))
539 // TOOD 8350865 We can call here with a non-flat array because of LIR_Assembler::emit_opFlattenedArrayCheck
540 if (array->klass()->is_flatArray_klass()) {
541 profile_flat_array(current, false, array->is_null_free_array());
542 }
543
544 NOT_PRODUCT(_store_flat_array_slowcase_cnt++;)
545 if (value == nullptr && array->is_null_free_array()) {
546 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
547 } else {
548 assert(array->klass()->is_flatArray_klass(), "should not be called");
549 array->obj_at_put(index, value, CHECK);
550 }
551 JRT_END
552
553 JRT_ENTRY(int, Runtime1::substitutability_check(JavaThread* current, oopDesc* left, oopDesc* right))
554 NOT_PRODUCT(_substitutability_check_slowcase_cnt++;)
555 JavaCallArguments args;
556 args.push_oop(Handle(THREAD, left));
557 args.push_oop(Handle(THREAD, right));
558 JavaValue result(T_BOOLEAN);
559 JavaCalls::call_static(&result,
560 vmClasses::ValueObjectMethods_klass(),
561 vmSymbols::isSubstitutable_name(),
562 vmSymbols::object_object_boolean_signature(),
563 &args, CHECK_0);
564 return result.get_jboolean() ? 1 : 0;
565 JRT_END
566
567
568 extern "C" void ps();
569
570 void Runtime1::buffer_inline_args_impl(JavaThread* current, Method* m, bool allocate_receiver) {
571 JavaThread* THREAD = current;
572 methodHandle method(current, m); // We are inside the verified_entry or verified_inline_ro_entry of this method.
573 oop obj = SharedRuntime::allocate_inline_types_impl(current, method, allocate_receiver, CHECK);
574 current->set_vm_result_oop(obj);
575 }
576
577 JRT_ENTRY(void, Runtime1::buffer_inline_args(JavaThread* current, Method* method))
578 NOT_PRODUCT(_buffer_inline_args_slowcase_cnt++;)
579 buffer_inline_args_impl(current, method, true);
580 JRT_END
581
582 JRT_ENTRY(void, Runtime1::buffer_inline_args_no_receiver(JavaThread* current, Method* method))
583 NOT_PRODUCT(_buffer_inline_args_no_receiver_slowcase_cnt++;)
584 buffer_inline_args_impl(current, method, false);
585 JRT_END
586
587 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubId id))
588 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
589 JRT_END
590
591
592 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
593 ResourceMark rm(current);
594 const char* klass_name = obj->klass()->external_name();
595 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
596 JRT_END
597
598
599 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
600 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
601 // method) method is passed as an argument. In order to do that it is embedded in the code as
602 // a constant.
603 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
604 nmethod* osr_nm = nullptr;
605 methodHandle method(current, m);
606
607 RegisterMap map(current,
608 RegisterMap::UpdateMap::skip,
609 RegisterMap::ProcessFrames::include,
610 RegisterMap::WalkContinuation::skip);
611 frame fr = current->last_frame().sender(&map);
612 nmethod* nm = (nmethod*) fr.cb();
613 assert(nm!= nullptr && nm->is_nmethod(), "Sanity check");
614 methodHandle enclosing_method(current, nm->method());
615
616 CompLevel level = (CompLevel)nm->comp_level();
617 int bci = InvocationEntryBci;
618 if (branch_bci != InvocationEntryBci) {
619 // Compute destination bci
620 address pc = method()->code_base() + branch_bci;
621 Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
622 int offset = 0;
623 switch (branch) {
624 case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
625 case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
626 case Bytecodes::_if_icmple: case Bytecodes::_ifle:
627 case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
628 case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
629 case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
630 case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
631 offset = (int16_t)Bytes::get_Java_u2(pc + 1);
632 break;
633 case Bytecodes::_goto_w:
634 offset = Bytes::get_Java_u4(pc + 1);
635 break;
636 default: ;
637 }
638 bci = branch_bci + offset;
639 }
640 osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
641 return osr_nm;
642 }
643
644 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
645 nmethod* osr_nm;
646 JRT_BLOCK_NO_ASYNC
647 osr_nm = counter_overflow_helper(current, bci, method);
648 if (osr_nm != nullptr) {
649 RegisterMap map(current,
650 RegisterMap::UpdateMap::skip,
651 RegisterMap::ProcessFrames::include,
652 RegisterMap::WalkContinuation::skip);
653 frame fr = current->last_frame().sender(&map);
654 Deoptimization::deoptimize_frame(current, fr.id());
655 }
656 JRT_BLOCK_END
657 return nullptr;
658 JRT_END
659
660 extern void vm_exit(int code);
661
662 // Enter this method from compiled code handler below. This is where we transition
663 // to VM mode. This is done as a helper routine so that the method called directly
664 // from compiled code does not have to transition to VM. This allows the entry
665 // method to see if the nmethod that we have just looked up a handler for has
666 // been deoptimized while we were in the vm. This simplifies the assembly code
667 // cpu directories.
668 //
669 // We are entering here from exception stub (via the entry method below)
670 // If there is a compiled exception handler in this method, we will continue there;
671 // otherwise we will unwind the stack and continue at the caller of top frame method
672 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
673 // control the area where we can allow a safepoint. After we exit the safepoint area we can
674 // check to see if the handler we are going to return is now in a nmethod that has
675 // been deoptimized. If that is the case we return the deopt blob
676 // unpack_with_exception entry instead. This makes life for the exception blob easier
677 // because making that same check and diverting is painful from assembly language.
678 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
679 MACOS_AARCH64_ONLY(current->wx_enable_write());
680 Handle exception(current, ex);
681
682 // This function is called when we are about to throw an exception. Therefore,
683 // we have to poll the stack watermark barrier to make sure that not yet safe
684 // stack frames are made safe before returning into them.
685 if (current->last_frame().cb() == Runtime1::blob_for(StubId::c1_handle_exception_from_callee_id)) {
686 // The StubId::c1_handle_exception_from_callee_id handler is invoked after the
687 // frame has been unwound. It instead builds its own stub frame, to call the
688 // runtime. But the throwing frame has already been unwound here.
689 StackWatermarkSet::after_unwind(current);
690 }
691
692 nm = CodeCache::find_nmethod(pc);
693 assert(nm != nullptr, "this is not an nmethod");
694 // Adjust the pc as needed/
695 if (nm->is_deopt_pc(pc)) {
696 RegisterMap map(current,
697 RegisterMap::UpdateMap::skip,
698 RegisterMap::ProcessFrames::include,
699 RegisterMap::WalkContinuation::skip);
700 frame exception_frame = current->last_frame().sender(&map);
701 // if the frame isn't deopted then pc must not correspond to the caller of last_frame
702 assert(exception_frame.is_deoptimized_frame(), "must be deopted");
703 pc = exception_frame.pc();
704 }
705 assert(exception.not_null(), "null exceptions should be handled by throw_exception");
706 // Check that exception is a subclass of Throwable
707 assert(exception->is_a(vmClasses::Throwable_klass()),
708 "Exception not subclass of Throwable");
709
710 // debugging support
711 // tracing
712 if (log_is_enabled(Info, exceptions)) {
713 ResourceMark rm; // print_value_string
714 stringStream tempst;
715 assert(nm->method() != nullptr, "Unexpected null method()");
716 tempst.print("C1 compiled method <%s>\n"
717 " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
718 nm->method()->print_value_string(), p2i(pc), p2i(current));
719 Exceptions::log_exception(exception, tempst.freeze());
720 }
721 // for AbortVMOnException flag
722 Exceptions::debug_check_abort(exception);
723
724 // Check the stack guard pages and re-enable them if necessary and there is
725 // enough space on the stack to do so. Use fast exceptions only if the guard
726 // pages are enabled.
727 bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed();
728
729 if (JvmtiExport::can_post_on_exceptions()) {
730 // To ensure correct notification of exception catches and throws
731 // we have to deoptimize here. If we attempted to notify the
732 // catches and throws during this exception lookup it's possible
733 // we could deoptimize on the way out of the VM and end back in
734 // the interpreter at the throw site. This would result in double
735 // notifications since the interpreter would also notify about
736 // these same catches and throws as it unwound the frame.
737
738 RegisterMap reg_map(current,
739 RegisterMap::UpdateMap::include,
740 RegisterMap::ProcessFrames::include,
741 RegisterMap::WalkContinuation::skip);
742 frame stub_frame = current->last_frame();
743 frame caller_frame = stub_frame.sender(®_map);
744
745 // We don't really want to deoptimize the nmethod itself since we
746 // can actually continue in the exception handler ourselves but I
747 // don't see an easy way to have the desired effect.
748 Deoptimization::deoptimize_frame(current, caller_frame.id());
749 assert(caller_is_deopted(current), "Must be deoptimized");
750
751 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
752 }
753
754 // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
755 if (guard_pages_enabled) {
756 address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
757 if (fast_continuation != nullptr) {
758 return fast_continuation;
759 }
760 }
761
762 // If the stack guard pages are enabled, check whether there is a handler in
763 // the current method. Otherwise (guard pages disabled), force an unwind and
764 // skip the exception cache update (i.e., just leave continuation as null).
765 address continuation = nullptr;
766 if (guard_pages_enabled) {
767
768 // New exception handling mechanism can support inlined methods
769 // with exception handlers since the mappings are from PC to PC
770
771 // Clear out the exception oop and pc since looking up an
772 // exception handler can cause class loading, which might throw an
773 // exception and those fields are expected to be clear during
774 // normal bytecode execution.
775 current->clear_exception_oop_and_pc();
776
777 bool recursive_exception = false;
778 continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
779 // If an exception was thrown during exception dispatch, the exception oop may have changed
780 current->set_exception_oop(exception());
781 current->set_exception_pc(pc);
782
783 // the exception cache is used only by non-implicit exceptions
784 // Update the exception cache only when there didn't happen
785 // another exception during the computation of the compiled
786 // exception handler. Checking for exception oop equality is not
787 // sufficient because some exceptions are pre-allocated and reused.
788 if (continuation != nullptr && !recursive_exception) {
789 nm->add_handler_for_exception_and_pc(exception, pc, continuation);
790 }
791 }
792
793 current->set_vm_result_oop(exception());
794
795 if (log_is_enabled(Info, exceptions)) {
796 ResourceMark rm;
797 log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT
798 " for exception thrown at PC " PTR_FORMAT,
799 p2i(current), p2i(continuation), p2i(pc));
800 }
801
802 return continuation;
803 JRT_END
804
805 // Enter this method from compiled code only if there is a Java exception handler
806 // in the method handling the exception.
807 // We are entering here from exception stub. We don't do a normal VM transition here.
808 // We do it in a helper. This is so we can check to see if the nmethod we have just
809 // searched for an exception handler has been deoptimized in the meantime.
810 address Runtime1::exception_handler_for_pc(JavaThread* current) {
811 oop exception = current->exception_oop();
812 address pc = current->exception_pc();
813 // Still in Java mode
814 DEBUG_ONLY(NoHandleMark nhm);
815 nmethod* nm = nullptr;
816 address continuation = nullptr;
817 {
818 // Enter VM mode by calling the helper
819 ResetNoHandleMark rnhm;
820 continuation = exception_handler_for_pc_helper(current, exception, pc, nm);
821 }
822 // Back in JAVA, use no oops DON'T safepoint
823
824 // Now check to see if the nmethod we were called from is now deoptimized.
825 // If so we must return to the deopt blob and deoptimize the nmethod
826 if (nm != nullptr && caller_is_deopted(current)) {
827 continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
828 }
829
830 assert(continuation != nullptr, "no handler found");
831 return continuation;
832 }
833
834
835 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* current, int index, arrayOopDesc* a))
836 #ifndef PRODUCT
837 if (PrintC1Statistics) {
838 _throw_range_check_exception_count++;
839 }
840 #endif
841 const int len = 35;
842 assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message.");
843 char message[2 * jintAsStringSize + len];
844 os::snprintf_checked(message, sizeof(message), "Index %d out of bounds for length %d", index, a->length());
845 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
846 JRT_END
847
848
849 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* current, int index))
850 #ifndef PRODUCT
851 if (PrintC1Statistics) {
852 _throw_index_exception_count++;
853 }
854 #endif
855 char message[16];
856 os::snprintf_checked(message, sizeof(message), "%d", index);
857 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
858 JRT_END
859
860
861 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* current))
862 #ifndef PRODUCT
863 if (PrintC1Statistics) {
864 _throw_div0_exception_count++;
865 }
866 #endif
867 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
868 JRT_END
869
870
871 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* current))
872 #ifndef PRODUCT
873 if (PrintC1Statistics) {
874 _throw_null_pointer_exception_count++;
875 }
876 #endif
877 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
878 JRT_END
879
880
881 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* current, oopDesc* object))
882 #ifndef PRODUCT
883 if (PrintC1Statistics) {
884 _throw_class_cast_exception_count++;
885 }
886 #endif
887 ResourceMark rm(current);
888 char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
889 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
890 JRT_END
891
892
893 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
894 #ifndef PRODUCT
895 if (PrintC1Statistics) {
896 _throw_incompatible_class_change_error_count++;
897 }
898 #endif
899 ResourceMark rm(current);
900 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
901 JRT_END
902
903
904 JRT_ENTRY(void, Runtime1::throw_illegal_monitor_state_exception(JavaThread* current))
905 NOT_PRODUCT(_throw_illegal_monitor_state_exception_count++;)
906 ResourceMark rm(current);
907 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IllegalMonitorStateException());
908 JRT_END
909
910 JRT_ENTRY(void, Runtime1::throw_identity_exception(JavaThread* current, oopDesc* object))
911 NOT_PRODUCT(_throw_identity_exception_count++;)
912 ResourceMark rm(current);
913 char* message = SharedRuntime::generate_identity_exception_message(current, object->klass());
914 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IdentityException(), message);
915 JRT_END
916
917 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
918 #ifndef PRODUCT
919 if (PrintC1Statistics) {
920 _monitorenter_slowcase_cnt++;
921 }
922 #endif
923 assert(obj == lock->obj(), "must match");
924 SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
925 JRT_END
926
927
928 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
929 assert(current == JavaThread::current(), "pre-condition");
930 #ifndef PRODUCT
931 if (PrintC1Statistics) {
932 _monitorexit_slowcase_cnt++;
933 }
934 #endif
935 assert(current->last_Java_sp(), "last_Java_sp must be set");
936 oop obj = lock->obj();
937 assert(oopDesc::is_oop(obj), "must be null or an object");
938 SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
939 JRT_END
940
941 // Cf. OptoRuntime::deoptimize_caller_frame
942 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
943 // Called from within the owner thread, so no need for safepoint
944 RegisterMap reg_map(current,
945 RegisterMap::UpdateMap::skip,
946 RegisterMap::ProcessFrames::include,
947 RegisterMap::WalkContinuation::skip);
948 frame stub_frame = current->last_frame();
949 assert(stub_frame.is_runtime_frame(), "Sanity check");
950 frame caller_frame = stub_frame.sender(®_map);
951 nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
952 assert(nm != nullptr, "Sanity check");
953 methodHandle method(current, nm->method());
954 assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
955 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
956 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
957
958 if (action == Deoptimization::Action_make_not_entrant) {
959 if (nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE)) {
960 if (reason == Deoptimization::Reason_tenured) {
961 MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
962 if (trap_mdo != nullptr) {
963 trap_mdo->inc_tenure_traps();
964 }
965 }
966 }
967 }
968
969 // Deoptimize the caller frame.
970 Deoptimization::deoptimize_frame(current, caller_frame.id());
971 // Return to the now deoptimized frame.
972 JRT_END
973
974
975 #ifndef DEOPTIMIZE_WHEN_PATCHING
976
977 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
978 Bytecode_field field_access(caller, bci);
979 // This can be static or non-static field access
980 Bytecodes::Code code = field_access.code();
981
982 // We must load class, initialize class and resolve the field
983 fieldDescriptor result; // initialize class if needed
984 constantPoolHandle constants(THREAD, caller->constants());
985 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);
986 return result.field_holder();
987 }
988
989
990 //
991 // This routine patches sites where a class wasn't loaded or
992 // initialized at the time the code was generated. It handles
993 // references to classes, fields and forcing of initialization. Most
994 // of the cases are straightforward and involving simply forcing
995 // resolution of a class, rewriting the instruction stream with the
996 // needed constant and replacing the call in this function with the
997 // patched code. The case for static field is more complicated since
998 // the thread which is in the process of initializing a class can
999 // access it's static fields but other threads can't so the code
1000 // either has to deoptimize when this case is detected or execute a
1001 // check that the current thread is the initializing thread. The
1002 // current
1003 //
1004 // Patches basically look like this:
1005 //
1006 //
1007 // patch_site: jmp patch stub ;; will be patched
1008 // continue: ...
1009 // ...
1010 // ...
1011 // ...
1012 //
1013 // They have a stub which looks like this:
1014 //
1015 // ;; patch body
1016 // movl <const>, reg (for class constants)
1017 // <or> movl [reg1 + <const>], reg (for field offsets)
1018 // <or> movl reg, [reg1 + <const>] (for field offsets)
1019 // <being_init offset> <bytes to copy> <bytes to skip>
1020 // patch_stub: call Runtime1::patch_code (through a runtime stub)
1021 // jmp patch_site
1022 //
1023 //
1024 // A normal patch is done by rewriting the patch body, usually a move,
1025 // and then copying it into place over top of the jmp instruction
1026 // being careful to flush caches and doing it in an MP-safe way. The
1027 // constants following the patch body are used to find various pieces
1028 // of the patch relative to the call site for Runtime1::patch_code.
1029 // The case for getstatic and putstatic is more complicated because
1030 // getstatic and putstatic have special semantics when executing while
1031 // the class is being initialized. getstatic/putstatic on a class
1032 // which is being_initialized may be executed by the initializing
1033 // thread but other threads have to block when they execute it. This
1034 // is accomplished in compiled code by executing a test of the current
1035 // thread against the initializing thread of the class. It's emitted
1036 // as boilerplate in their stub which allows the patched code to be
1037 // executed before it's copied back into the main body of the nmethod.
1038 //
1039 // being_init: get_thread(<tmp reg>
1040 // cmpl [reg1 + <init_thread_offset>], <tmp reg>
1041 // jne patch_stub
1042 // movl [reg1 + <const>], reg (for field offsets) <or>
1043 // movl reg, [reg1 + <const>] (for field offsets)
1044 // jmp continue
1045 // <being_init offset> <bytes to copy> <bytes to skip>
1046 // patch_stub: jmp Runtime1::patch_code (through a runtime stub)
1047 // jmp patch_site
1048 //
1049 // If the class is being initialized the patch body is rewritten and
1050 // the patch site is rewritten to jump to being_init, instead of
1051 // patch_stub. Whenever this code is executed it checks the current
1052 // thread against the initializing thread so other threads will enter
1053 // the runtime and end up blocked waiting the class to finish
1054 // initializing inside the calls to resolve_field below. The
1055 // initializing class will continue on it's way. Once the class is
1056 // fully_initialized, the intializing_thread of the class becomes
1057 // null, so the next thread to execute this code will fail the test,
1058 // call into patch_code and complete the patching process by copying
1059 // the patch body back into the main part of the nmethod and resume
1060 // executing.
1061
1062 // NB:
1063 //
1064 // Patchable instruction sequences inherently exhibit race conditions,
1065 // where thread A is patching an instruction at the same time thread B
1066 // is executing it. The algorithms we use ensure that any observation
1067 // that B can make on any intermediate states during A's patching will
1068 // always end up with a correct outcome. This is easiest if there are
1069 // few or no intermediate states. (Some inline caches have two
1070 // related instructions that must be patched in tandem. For those,
1071 // intermediate states seem to be unavoidable, but we will get the
1072 // right answer from all possible observation orders.)
1073 //
1074 // When patching the entry instruction at the head of a method, or a
1075 // linkable call instruction inside of a method, we try very hard to
1076 // use a patch sequence which executes as a single memory transaction.
1077 // This means, in practice, that when thread A patches an instruction,
1078 // it should patch a 32-bit or 64-bit word that somehow overlaps the
1079 // instruction or is contained in it. We believe that memory hardware
1080 // will never break up such a word write, if it is naturally aligned
1081 // for the word being written. We also know that some CPUs work very
1082 // hard to create atomic updates even of naturally unaligned words,
1083 // but we don't want to bet the farm on this always working.
1084 //
1085 // Therefore, if there is any chance of a race condition, we try to
1086 // patch only naturally aligned words, as single, full-word writes.
1087
1088 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, StubId stub_id ))
1089 #ifndef PRODUCT
1090 if (PrintC1Statistics) {
1091 _patch_code_slowcase_cnt++;
1092 }
1093 #endif
1094
1095 ResourceMark rm(current);
1096 RegisterMap reg_map(current,
1097 RegisterMap::UpdateMap::skip,
1098 RegisterMap::ProcessFrames::include,
1099 RegisterMap::WalkContinuation::skip);
1100 frame runtime_frame = current->last_frame();
1101 frame caller_frame = runtime_frame.sender(®_map);
1102
1103 // last java frame on stack
1104 vframeStream vfst(current, true);
1105 assert(!vfst.at_end(), "Java frame must exist");
1106
1107 methodHandle caller_method(current, vfst.method());
1108 // Note that caller_method->code() may not be same as caller_code because of OSR's
1109 // Note also that in the presence of inlining it is not guaranteed
1110 // that caller_method() == caller_code->method()
1111
1112 int bci = vfst.bci();
1113 Bytecodes::Code code = caller_method()->java_code_at(bci);
1114
1115 // this is used by assertions in the access_field_patching_id
1116 BasicType patch_field_type = T_ILLEGAL;
1117 bool deoptimize_for_volatile = false;
1118 bool deoptimize_for_atomic = false;
1119 bool deoptimize_for_null_free = false;
1120 bool deoptimize_for_flat = false;
1121 bool deoptimize_for_strict_static = false;
1122 int patch_field_offset = -1;
1123 Klass* init_klass = nullptr; // klass needed by load_klass_patching code
1124 Klass* load_klass = nullptr; // klass needed by load_klass_patching code
1125 Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
1126 Handle appendix(current, nullptr); // oop needed by appendix_patching code
1127 bool load_klass_or_mirror_patch_id =
1128 (stub_id == StubId::c1_load_klass_patching_id || stub_id == StubId::c1_load_mirror_patching_id);
1129
1130 if (stub_id == StubId::c1_access_field_patching_id) {
1131
1132 Bytecode_field field_access(caller_method, bci);
1133 fieldDescriptor result; // initialize class if needed
1134 Bytecodes::Code code = field_access.code();
1135 constantPoolHandle constants(current, caller_method->constants());
1136 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
1137 patch_field_offset = result.offset();
1138
1139 // If we're patching a field which is volatile then at compile it
1140 // must not have been know to be volatile, so the generated code
1141 // isn't correct for a volatile reference. The nmethod has to be
1142 // deoptimized so that the code can be regenerated correctly.
1143 // This check is only needed for access_field_patching since this
1144 // is the path for patching field offsets. load_klass is only
1145 // used for patching references to oops which don't need special
1146 // handling in the volatile case.
1147
1148 deoptimize_for_volatile = result.access_flags().is_volatile();
1149
1150 // If we are patching a field which should be atomic, then
1151 // the generated code is not correct either, force deoptimizing.
1152 // We need to only cover T_LONG and T_DOUBLE fields, as we can
1153 // break access atomicity only for them.
1154
1155 // Strictly speaking, the deoptimization on 64-bit platforms
1156 // is unnecessary, and T_LONG stores on 32-bit platforms need
1157 // to be handled by special patching code when AlwaysAtomicAccesses
1158 // becomes product feature. At this point, we are still going
1159 // for the deoptimization for consistency against volatile
1160 // accesses.
1161
1162 patch_field_type = result.field_type();
1163 deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
1164
1165 // The field we are patching is null-free. Deoptimize and regenerate
1166 // the compiled code if we patch a putfield/putstatic because it
1167 // does not contain the required null check.
1168 deoptimize_for_null_free = result.is_null_free_inline_type() && (field_access.is_putfield() || field_access.is_putstatic());
1169
1170 // The field we are patching is flat. Deoptimize and regenerate
1171 // the compiled code which can't handle the layout of the flat
1172 // field because it was unknown at compile time.
1173 deoptimize_for_flat = result.is_flat();
1174
1175 // Strict statics may require tracking if their class is not fully initialized.
1176 // For now we can bail out of the compiler and let the interpreter handle it.
1177 deoptimize_for_strict_static = result.is_strict_static_unset();
1178 } else if (load_klass_or_mirror_patch_id) {
1179 Klass* k = nullptr;
1180 switch (code) {
1181 case Bytecodes::_putstatic:
1182 case Bytecodes::_getstatic:
1183 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
1184 init_klass = klass;
1185 mirror = Handle(current, klass->java_mirror());
1186 }
1187 break;
1188 case Bytecodes::_new:
1189 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
1190 k = caller_method->constants()->klass_at(bnew.index(), CHECK);
1191 }
1192 break;
1193 case Bytecodes::_multianewarray:
1194 { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
1195 k = caller_method->constants()->klass_at(mna.index(), CHECK);
1196 }
1197 break;
1198 case Bytecodes::_instanceof:
1199 { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
1200 k = caller_method->constants()->klass_at(io.index(), CHECK);
1201 }
1202 break;
1203 case Bytecodes::_checkcast:
1204 { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
1205 k = caller_method->constants()->klass_at(cc.index(), CHECK);
1206 }
1207 break;
1208 case Bytecodes::_anewarray:
1209 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
1210 Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
1211 k = ek->array_klass(CHECK);
1212 if (!k->is_typeArray_klass() && !k->is_refArray_klass() && !k->is_flatArray_klass()) {
1213 k = ObjArrayKlass::cast(k)->klass_with_properties(ArrayProperties::Default(), THREAD);
1214 }
1215 if (k->is_flatArray_klass()) {
1216 deoptimize_for_flat = true;
1217 }
1218 }
1219 break;
1220 case Bytecodes::_ldc:
1221 case Bytecodes::_ldc_w:
1222 case Bytecodes::_ldc2_w:
1223 {
1224 Bytecode_loadconstant cc(caller_method, bci);
1225 oop m = cc.resolve_constant(CHECK);
1226 mirror = Handle(current, m);
1227 }
1228 break;
1229 default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
1230 }
1231 load_klass = k;
1232 } else if (stub_id == StubId::c1_load_appendix_patching_id) {
1233 Bytecode_invoke bytecode(caller_method, bci);
1234 Bytecodes::Code bc = bytecode.invoke_code();
1235
1236 CallInfo info;
1237 constantPoolHandle pool(current, caller_method->constants());
1238 int index = bytecode.index();
1239 LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
1240 switch (bc) {
1241 case Bytecodes::_invokehandle: {
1242 ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info);
1243 appendix = Handle(current, pool->cache()->appendix_if_resolved(entry));
1244 break;
1245 }
1246 case Bytecodes::_invokedynamic: {
1247 appendix = Handle(current, pool->cache()->set_dynamic_call(info, index));
1248 break;
1249 }
1250 default: fatal("unexpected bytecode for load_appendix_patching_id");
1251 }
1252 } else {
1253 ShouldNotReachHere();
1254 }
1255
1256 if (deoptimize_for_volatile ||
1257 deoptimize_for_atomic ||
1258 deoptimize_for_null_free ||
1259 deoptimize_for_flat ||
1260 deoptimize_for_strict_static) {
1261 // At compile time we assumed the field wasn't volatile/atomic but after
1262 // loading it turns out it was volatile/atomic so we have to throw the
1263 // compiled code out and let it be regenerated.
1264 if (TracePatching) {
1265 if (deoptimize_for_volatile) {
1266 tty->print_cr("Deoptimizing for patching volatile field reference");
1267 }
1268 if (deoptimize_for_atomic) {
1269 tty->print_cr("Deoptimizing for patching atomic field reference");
1270 }
1271 if (deoptimize_for_null_free) {
1272 tty->print_cr("Deoptimizing for patching null-free field reference");
1273 }
1274 if (deoptimize_for_flat) {
1275 tty->print_cr("Deoptimizing for patching flat field or array reference");
1276 }
1277 if (deoptimize_for_strict_static) {
1278 tty->print_cr("Deoptimizing for patching strict static field reference");
1279 }
1280 }
1281
1282 // It's possible the nmethod was invalidated in the last
1283 // safepoint, but if it's still alive then make it not_entrant.
1284 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1285 if (nm != nullptr) {
1286 nm->make_not_entrant(nmethod::InvalidationReason::C1_CODEPATCH);
1287 }
1288
1289 Deoptimization::deoptimize_frame(current, caller_frame.id());
1290
1291 // Return to the now deoptimized frame.
1292 }
1293
1294 // Now copy code back
1295
1296 {
1297 MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1298 //
1299 // Deoptimization may have happened while we waited for the lock.
1300 // In that case we don't bother to do any patching we just return
1301 // and let the deopt happen
1302 if (!caller_is_deopted(current)) {
1303 NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
1304 address instr_pc = jump->jump_destination();
1305 NativeInstruction* ni = nativeInstruction_at(instr_pc);
1306 if (ni->is_jump() ) {
1307 // the jump has not been patched yet
1308 // The jump destination is slow case and therefore not part of the stubs
1309 // (stubs are only for StaticCalls)
1310
1311 // format of buffer
1312 // ....
1313 // instr byte 0 <-- copy_buff
1314 // instr byte 1
1315 // ..
1316 // instr byte n-1
1317 // n
1318 // .... <-- call destination
1319
1320 address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
1321 unsigned char* byte_count = (unsigned char*) (stub_location - 1);
1322 unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
1323 unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
1324 address copy_buff = stub_location - *byte_skip - *byte_count;
1325 address being_initialized_entry = stub_location - *being_initialized_entry_offset;
1326 if (TracePatching) {
1327 ttyLocker ttyl;
1328 tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT " (%s)", Bytecodes::name(code), bci,
1329 p2i(instr_pc), (stub_id == StubId::c1_access_field_patching_id) ? "field" : "klass");
1330 nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
1331 assert(caller_code != nullptr, "nmethod not found");
1332
1333 // NOTE we use pc() not original_pc() because we already know they are
1334 // identical otherwise we'd have never entered this block of code
1335
1336 const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
1337 assert(map != nullptr, "null check");
1338 map->print();
1339 tty->cr();
1340
1341 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1342 }
1343 // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1344 bool do_patch = true;
1345 if (stub_id == StubId::c1_access_field_patching_id) {
1346 // The offset may not be correct if the class was not loaded at code generation time.
1347 // Set it now.
1348 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1349 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1350 assert(patch_field_offset >= 0, "illegal offset");
1351 n_move->add_offset_in_bytes(patch_field_offset);
1352 } else if (load_klass_or_mirror_patch_id) {
1353 // If a getstatic or putstatic is referencing a klass which
1354 // isn't fully initialized, the patch body isn't copied into
1355 // place until initialization is complete. In this case the
1356 // patch site is setup so that any threads besides the
1357 // initializing thread are forced to come into the VM and
1358 // block.
1359 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1360 InstanceKlass::cast(init_klass)->is_initialized();
1361 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1362 if (jump->jump_destination() == being_initialized_entry) {
1363 assert(do_patch == true, "initialization must be complete at this point");
1364 } else {
1365 // patch the instruction <move reg, klass>
1366 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1367
1368 assert(n_copy->data() == 0 ||
1369 n_copy->data() == (intptr_t)Universe::non_oop_word(),
1370 "illegal init value");
1371 if (stub_id == StubId::c1_load_klass_patching_id) {
1372 assert(load_klass != nullptr, "klass not set");
1373 n_copy->set_data((intx) (load_klass));
1374 } else {
1375 // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1376 n_copy->set_data(cast_from_oop<intx>(mirror()));
1377 }
1378
1379 if (TracePatching) {
1380 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1381 }
1382 }
1383 } else if (stub_id == StubId::c1_load_appendix_patching_id) {
1384 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1385 assert(n_copy->data() == 0 ||
1386 n_copy->data() == (intptr_t)Universe::non_oop_word(),
1387 "illegal init value");
1388 n_copy->set_data(cast_from_oop<intx>(appendix()));
1389
1390 if (TracePatching) {
1391 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1392 }
1393 } else {
1394 ShouldNotReachHere();
1395 }
1396
1397 if (do_patch) {
1398 // replace instructions
1399 // first replace the tail, then the call
1400 #ifdef ARM
1401 if((load_klass_or_mirror_patch_id ||
1402 stub_id == StubId::c1_load_appendix_patching_id) &&
1403 nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1404 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1405 address addr = nullptr;
1406 assert(nm != nullptr, "invalid nmethod_pc");
1407 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1408 while (mds.next()) {
1409 if (mds.type() == relocInfo::oop_type) {
1410 assert(stub_id == StubId::c1_load_mirror_patching_id ||
1411 stub_id == StubId::c1_load_appendix_patching_id, "wrong stub id");
1412 oop_Relocation* r = mds.oop_reloc();
1413 addr = (address)r->oop_addr();
1414 break;
1415 } else if (mds.type() == relocInfo::metadata_type) {
1416 assert(stub_id == StubId::c1_load_klass_patching_id, "wrong stub id");
1417 metadata_Relocation* r = mds.metadata_reloc();
1418 addr = (address)r->metadata_addr();
1419 break;
1420 }
1421 }
1422 assert(addr != nullptr, "metadata relocation must exist");
1423 copy_buff -= *byte_count;
1424 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
1425 n_copy2->set_pc_relative_offset(addr, instr_pc);
1426 }
1427 #endif
1428
1429 for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) {
1430 address ptr = copy_buff + i;
1431 int a_byte = (*ptr) & 0xFF;
1432 address dst = instr_pc + i;
1433 *(unsigned char*)dst = (unsigned char) a_byte;
1434 }
1435 ICache::invalidate_range(instr_pc, *byte_count);
1436 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1437
1438 if (load_klass_or_mirror_patch_id ||
1439 stub_id == StubId::c1_load_appendix_patching_id) {
1440 relocInfo::relocType rtype =
1441 (stub_id == StubId::c1_load_klass_patching_id) ?
1442 relocInfo::metadata_type :
1443 relocInfo::oop_type;
1444 // update relocInfo to metadata
1445 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1446 assert(nm != nullptr, "invalid nmethod_pc");
1447
1448 // The old patch site is now a move instruction so update
1449 // the reloc info so that it will get updated during
1450 // future GCs.
1451 RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1452 relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1453 relocInfo::none, rtype);
1454 }
1455
1456 } else {
1457 ICache::invalidate_range(copy_buff, *byte_count);
1458 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1459 }
1460 }
1461 }
1462 // If we are patching in a non-perm oop, make sure the nmethod
1463 // is on the right list.
1464 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1465 guarantee(nm != nullptr, "only nmethods can contain non-perm oops");
1466
1467 // Since we've patched some oops in the nmethod,
1468 // (re)register it with the heap.
1469 Universe::heap()->register_nmethod(nm);
1470 }
1471 JRT_END
1472
1473 #else // DEOPTIMIZE_WHEN_PATCHING
1474
1475 static bool is_patching_needed(JavaThread* current, StubId stub_id) {
1476 if (stub_id == StubId::c1_load_klass_patching_id ||
1477 stub_id == StubId::c1_load_mirror_patching_id) {
1478 // last java frame on stack
1479 vframeStream vfst(current, true);
1480 assert(!vfst.at_end(), "Java frame must exist");
1481
1482 methodHandle caller_method(current, vfst.method());
1483 int bci = vfst.bci();
1484 Bytecodes::Code code = caller_method()->java_code_at(bci);
1485
1486 switch (code) {
1487 case Bytecodes::_new:
1488 case Bytecodes::_anewarray:
1489 case Bytecodes::_multianewarray:
1490 case Bytecodes::_instanceof:
1491 case Bytecodes::_checkcast: {
1492 Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1493 constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1494 if (tag.is_unresolved_klass_in_error()) {
1495 return false; // throws resolution error
1496 }
1497 break;
1498 }
1499
1500 default: break;
1501 }
1502 }
1503 return true;
1504 }
1505
1506 void Runtime1::patch_code(JavaThread* current, StubId stub_id) {
1507 #ifndef PRODUCT
1508 if (PrintC1Statistics) {
1509 _patch_code_slowcase_cnt++;
1510 }
1511 #endif
1512
1513 // Enable WXWrite: the function is called by c1 stub as a runtime function
1514 // (see another implementation above).
1515 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1516
1517 if (TracePatching) {
1518 tty->print_cr("Deoptimizing because patch is needed");
1519 }
1520
1521 RegisterMap reg_map(current,
1522 RegisterMap::UpdateMap::skip,
1523 RegisterMap::ProcessFrames::include,
1524 RegisterMap::WalkContinuation::skip);
1525
1526 frame runtime_frame = current->last_frame();
1527 frame caller_frame = runtime_frame.sender(®_map);
1528 assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1529
1530 if (is_patching_needed(current, stub_id)) {
1531 // Make sure the nmethod is invalidated, i.e. made not entrant.
1532 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1533 if (nm != nullptr) {
1534 nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING);
1535 }
1536 }
1537
1538 Deoptimization::deoptimize_frame(current, caller_frame.id());
1539 // Return to the now deoptimized frame.
1540 postcond(caller_is_deopted(current));
1541 }
1542
1543 #endif // DEOPTIMIZE_WHEN_PATCHING
1544
1545 // Entry point for compiled code. We want to patch a nmethod.
1546 // We don't do a normal VM transition here because we want to
1547 // know after the patching is complete and any safepoint(s) are taken
1548 // if the calling nmethod was deoptimized. We do this by calling a
1549 // helper method which does the normal VM transition and when it
1550 // completes we can check for deoptimization. This simplifies the
1551 // assembly code in the cpu directories.
1552 //
1553 int Runtime1::move_klass_patching(JavaThread* current) {
1554 //
1555 // NOTE: we are still in Java
1556 //
1557 DEBUG_ONLY(NoHandleMark nhm;)
1558 {
1559 // Enter VM mode
1560 ResetNoHandleMark rnhm;
1561 patch_code(current, StubId::c1_load_klass_patching_id);
1562 }
1563 // Back in JAVA, use no oops DON'T safepoint
1564
1565 // Return true if calling code is deoptimized
1566
1567 return caller_is_deopted(current);
1568 }
1569
1570 int Runtime1::move_mirror_patching(JavaThread* current) {
1571 //
1572 // NOTE: we are still in Java
1573 //
1574 DEBUG_ONLY(NoHandleMark nhm;)
1575 {
1576 // Enter VM mode
1577 ResetNoHandleMark rnhm;
1578 patch_code(current, StubId::c1_load_mirror_patching_id);
1579 }
1580 // Back in JAVA, use no oops DON'T safepoint
1581
1582 // Return true if calling code is deoptimized
1583
1584 return caller_is_deopted(current);
1585 }
1586
1587 int Runtime1::move_appendix_patching(JavaThread* current) {
1588 //
1589 // NOTE: we are still in Java
1590 //
1591 DEBUG_ONLY(NoHandleMark nhm;)
1592 {
1593 // Enter VM mode
1594 ResetNoHandleMark rnhm;
1595 patch_code(current, StubId::c1_load_appendix_patching_id);
1596 }
1597 // Back in JAVA, use no oops DON'T safepoint
1598
1599 // Return true if calling code is deoptimized
1600
1601 return caller_is_deopted(current);
1602 }
1603
1604 // Entry point for compiled code. We want to patch a nmethod.
1605 // We don't do a normal VM transition here because we want to
1606 // know after the patching is complete and any safepoint(s) are taken
1607 // if the calling nmethod was deoptimized. We do this by calling a
1608 // helper method which does the normal VM transition and when it
1609 // completes we can check for deoptimization. This simplifies the
1610 // assembly code in the cpu directories.
1611 //
1612 int Runtime1::access_field_patching(JavaThread* current) {
1613 //
1614 // NOTE: we are still in Java
1615 //
1616 // Handles created in this function will be deleted by the
1617 // HandleMarkCleaner in the transition to the VM.
1618 NoHandleMark nhm;
1619 {
1620 // Enter VM mode
1621 ResetNoHandleMark rnhm;
1622 patch_code(current, StubId::c1_access_field_patching_id);
1623 }
1624 // Back in JAVA, use no oops DON'T safepoint
1625
1626 // Return true if calling code is deoptimized
1627
1628 return caller_is_deopted(current);
1629 }
1630
1631
1632 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1633 // for now we just print out the block id
1634 tty->print("%d ", block_id);
1635 JRT_END
1636
1637
1638 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1639 // had to return int instead of bool, otherwise there may be a mismatch
1640 // between the C calling convention and the Java one.
1641 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1642 // JVM takes the whole %eax as the return value, which may misinterpret
1643 // the return value as a boolean true.
1644
1645 assert(mirror != nullptr, "should null-check on mirror before calling");
1646 Klass* k = java_lang_Class::as_Klass(mirror);
1647 return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1648 JRT_END
1649
1650 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1651 ResourceMark rm;
1652
1653 RegisterMap reg_map(current,
1654 RegisterMap::UpdateMap::skip,
1655 RegisterMap::ProcessFrames::include,
1656 RegisterMap::WalkContinuation::skip);
1657 frame runtime_frame = current->last_frame();
1658 frame caller_frame = runtime_frame.sender(®_map);
1659
1660 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1661 assert (nm != nullptr, "no more nmethod?");
1662 nm->make_not_entrant(nmethod::InvalidationReason::C1_PREDICATE_FAILED_TRAP);
1663
1664 methodHandle m(current, nm->method());
1665 MethodData* mdo = m->method_data();
1666
1667 if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1668 // Build an MDO. Ignore errors like OutOfMemory;
1669 // that simply means we won't have an MDO to update.
1670 Method::build_profiling_method_data(m, THREAD);
1671 if (HAS_PENDING_EXCEPTION) {
1672 // Only metaspace OOM is expected. No Java code executed.
1673 assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
1674 CLEAR_PENDING_EXCEPTION;
1675 }
1676 mdo = m->method_data();
1677 }
1678
1679 if (mdo != nullptr) {
1680 mdo->inc_trap_count(Deoptimization::Reason_none);
1681 }
1682
1683 if (TracePredicateFailedTraps) {
1684 stringStream ss1, ss2;
1685 vframeStream vfst(current);
1686 Method* inlinee = vfst.method();
1687 inlinee->print_short_name(&ss1);
1688 m->print_short_name(&ss2);
1689 tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.freeze(), vfst.bci(), ss2.freeze(), p2i(caller_frame.pc()));
1690 }
1691
1692
1693 Deoptimization::deoptimize_frame(current, caller_frame.id());
1694
1695 JRT_END
1696
1697 // Check exception if AbortVMOnException flag set
1698 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1699 ResourceMark rm;
1700 const char* message = nullptr;
1701 if (ex->is_a(vmClasses::Throwable_klass())) {
1702 oop msg = java_lang_Throwable::message(ex);
1703 if (msg != nullptr) {
1704 message = java_lang_String::as_utf8_string(msg);
1705 }
1706 }
1707 Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1708 JRT_END
1709
1710 #ifndef PRODUCT
1711 void Runtime1::print_statistics() {
1712 tty->print_cr("C1 Runtime statistics:");
1713 tty->print_cr(" _resolve_invoke_virtual_cnt: %u", SharedRuntime::_resolve_virtual_ctr);
1714 tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1715 tty->print_cr(" _resolve_invoke_static_cnt: %u", SharedRuntime::_resolve_static_ctr);
1716 tty->print_cr(" _handle_wrong_method_cnt: %u", SharedRuntime::_wrong_method_ctr);
1717 tty->print_cr(" _ic_miss_cnt: %u", SharedRuntime::_ic_miss_ctr);
1718 tty->print_cr(" _generic_arraycopystub_cnt: %u", _generic_arraycopystub_cnt);
1719 tty->print_cr(" _byte_arraycopy_cnt: %u", _byte_arraycopy_stub_cnt);
1720 tty->print_cr(" _short_arraycopy_cnt: %u", _short_arraycopy_stub_cnt);
1721 tty->print_cr(" _int_arraycopy_cnt: %u", _int_arraycopy_stub_cnt);
1722 tty->print_cr(" _long_arraycopy_cnt: %u", _long_arraycopy_stub_cnt);
1723 tty->print_cr(" _oop_arraycopy_cnt: %u", _oop_arraycopy_stub_cnt);
1724 tty->print_cr(" _arraycopy_slowcase_cnt: %u", _arraycopy_slowcase_cnt);
1725 tty->print_cr(" _arraycopy_checkcast_cnt: %u", _arraycopy_checkcast_cnt);
1726 tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1727
1728 tty->print_cr(" _new_type_array_slowcase_cnt: %u", _new_type_array_slowcase_cnt);
1729 tty->print_cr(" _new_object_array_slowcase_cnt: %u", _new_object_array_slowcase_cnt);
1730 tty->print_cr(" _new_null_free_array_slowcase_cnt: %u", _new_null_free_array_slowcase_cnt);
1731 tty->print_cr(" _new_instance_slowcase_cnt: %u", _new_instance_slowcase_cnt);
1732 tty->print_cr(" _new_multi_array_slowcase_cnt: %u", _new_multi_array_slowcase_cnt);
1733 tty->print_cr(" _load_flat_array_slowcase_cnt: %u", _load_flat_array_slowcase_cnt);
1734 tty->print_cr(" _store_flat_array_slowcase_cnt: %u", _store_flat_array_slowcase_cnt);
1735 tty->print_cr(" _substitutability_check_slowcase_cnt: %u", _substitutability_check_slowcase_cnt);
1736 tty->print_cr(" _buffer_inline_args_slowcase_cnt:%u", _buffer_inline_args_slowcase_cnt);
1737 tty->print_cr(" _buffer_inline_args_no_receiver_slowcase_cnt:%u", _buffer_inline_args_no_receiver_slowcase_cnt);
1738
1739 tty->print_cr(" _monitorenter_slowcase_cnt: %u", _monitorenter_slowcase_cnt);
1740 tty->print_cr(" _monitorexit_slowcase_cnt: %u", _monitorexit_slowcase_cnt);
1741 tty->print_cr(" _patch_code_slowcase_cnt: %u", _patch_code_slowcase_cnt);
1742
1743 tty->print_cr(" _throw_range_check_exception_count: %u:", _throw_range_check_exception_count);
1744 tty->print_cr(" _throw_index_exception_count: %u:", _throw_index_exception_count);
1745 tty->print_cr(" _throw_div0_exception_count: %u:", _throw_div0_exception_count);
1746 tty->print_cr(" _throw_null_pointer_exception_count: %u:", _throw_null_pointer_exception_count);
1747 tty->print_cr(" _throw_class_cast_exception_count: %u:", _throw_class_cast_exception_count);
1748 tty->print_cr(" _throw_incompatible_class_change_error_count: %u:", _throw_incompatible_class_change_error_count);
1749 tty->print_cr(" _throw_illegal_monitor_state_exception_count: %u:", _throw_illegal_monitor_state_exception_count);
1750 tty->print_cr(" _throw_identity_exception_count: %u:", _throw_identity_exception_count);
1751 tty->print_cr(" _throw_count: %u:", _throw_count);
1752
1753 SharedRuntime::print_ic_miss_histogram();
1754 tty->cr();
1755 }
1756 #endif // PRODUCT