1 /*
2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_CodeStubs.hpp"
26 #include "c1/c1_FrameMap.hpp"
27 #include "c1/c1_LIRAssembler.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "classfile/javaClasses.hpp"
31 #include "nativeInst_x86.hpp"
32 #include "runtime/sharedRuntime.hpp"
33 #include "utilities/align.hpp"
34 #include "utilities/macros.hpp"
35 #include "vmreg_x86.inline.hpp"
36
37
38 #define __ ce->masm()->
39
40 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
41 __ bind(_entry);
42 InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
43 __ lea(rscratch1, safepoint_pc);
44 __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
45
46 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
47 "polling page return stub not created yet");
48
49 address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
50 __ jump(RuntimeAddress(stub));
51 }
52
53 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
54 __ bind(_entry);
55 Metadata *m = _method->as_constant_ptr()->as_metadata();
56 ce->store_parameter(m, 1);
57 ce->store_parameter(_bci, 0);
58 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_counter_overflow_id)));
59 ce->add_call_info_here(_info);
60 ce->verify_oop_map(_info);
61 __ jmp(_continuation);
62 }
63
64 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
65 __ bind(_entry);
66 if (_info->deoptimize_on_exception()) {
67 address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id);
68 __ call(RuntimeAddress(a));
69 ce->add_call_info_here(_info);
70 ce->verify_oop_map(_info);
71 DEBUG_ONLY(__ should_not_reach_here());
72 return;
73 }
74
75 // pass the array index on stack because all registers must be preserved
76 if (_index->is_cpu_register()) {
77 ce->store_parameter(_index->as_register(), 0);
78 } else {
79 ce->store_parameter(_index->as_jint(), 0);
80 }
81 StubId stub_id;
82 if (_throw_index_out_of_bounds_exception) {
83 stub_id = StubId::c1_throw_index_exception_id;
84 } else {
85 stub_id = StubId::c1_throw_range_check_failed_id;
86 ce->store_parameter(_array->as_pointer_register(), 1);
87 }
88 __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
89 ce->add_call_info_here(_info);
90 ce->verify_oop_map(_info);
91 DEBUG_ONLY(__ should_not_reach_here());
92 }
93
94 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
95 _info = new CodeEmitInfo(info);
96 }
97
98 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
99 __ bind(_entry);
100 address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id);
101 __ call(RuntimeAddress(a));
102 ce->add_call_info_here(_info);
103 ce->verify_oop_map(_info);
104 DEBUG_ONLY(__ should_not_reach_here());
105 }
106
107 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
108 if (_offset != -1) {
109 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
110 }
111 __ bind(_entry);
112 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_throw_div0_exception_id)));
113 ce->add_call_info_here(_info);
114 DEBUG_ONLY(__ should_not_reach_here());
115 }
116
117
118 // Implementation of NewInstanceStub
119
120 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) {
121 _result = result;
122 _klass = klass;
123 _klass_reg = klass_reg;
124 _info = new CodeEmitInfo(info);
125 assert(stub_id == StubId::c1_new_instance_id ||
126 stub_id == StubId::c1_fast_new_instance_id ||
127 stub_id == StubId::c1_fast_new_instance_init_check_id,
128 "need new_instance id");
129 _stub_id = stub_id;
130 }
131
132
133 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
134 assert(__ rsp_offset() == 0, "frame size should be fixed");
135 __ bind(_entry);
136 __ movptr(rdx, _klass_reg->as_register());
137 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
138 ce->add_call_info_here(_info);
139 ce->verify_oop_map(_info);
140 assert(_result->as_register() == rax, "result must in rax,");
141 __ jmp(_continuation);
142 }
143
144
145 // Implementation of NewTypeArrayStub
146
147 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
148 _klass_reg = klass_reg;
149 _length = length;
150 _result = result;
151 _info = new CodeEmitInfo(info);
152 }
153
154
155 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
156 assert(__ rsp_offset() == 0, "frame size should be fixed");
157 __ bind(_entry);
158 assert(_length->as_register() == rbx, "length must in rbx,");
159 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
160 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_new_type_array_id)));
161 ce->add_call_info_here(_info);
162 ce->verify_oop_map(_info);
163 assert(_result->as_register() == rax, "result must in rax,");
164 __ jmp(_continuation);
165 }
166
167
168 // Implementation of NewObjectArrayStub
169
170 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
171 _klass_reg = klass_reg;
172 _result = result;
173 _length = length;
174 _info = new CodeEmitInfo(info);
175 }
176
177
178 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
179 assert(__ rsp_offset() == 0, "frame size should be fixed");
180 __ bind(_entry);
181 assert(_length->as_register() == rbx, "length must in rbx,");
182 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
183 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_new_object_array_id)));
184 ce->add_call_info_here(_info);
185 ce->verify_oop_map(_info);
186 assert(_result->as_register() == rax, "result must in rax,");
187 __ jmp(_continuation);
188 }
189
190 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
191 assert(__ rsp_offset() == 0, "frame size should be fixed");
192 __ bind(_entry);
193 ce->store_parameter(_obj_reg->as_register(), 1);
194 ce->store_parameter(_lock_reg->as_register(), 0);
195 StubId enter_id;
196 if (ce->compilation()->has_fpu_code()) {
197 enter_id = StubId::c1_monitorenter_id;
198 } else {
199 enter_id = StubId::c1_monitorenter_nofpu_id;
200 }
201 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
202 ce->add_call_info_here(_info);
203 ce->verify_oop_map(_info);
204 __ jmp(_continuation);
205 }
206
207
208 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
209 __ bind(_entry);
210
211 // lock_reg was destroyed by fast unlocking attempt => recompute it
212 ce->monitor_address(_monitor_ix, _lock_reg);
213
214 ce->store_parameter(_lock_reg->as_register(), 0);
215 // note: non-blocking leaf routine => no call info needed
216 StubId exit_id;
217 if (ce->compilation()->has_fpu_code()) {
218 exit_id = StubId::c1_monitorexit_id;
219 } else {
220 exit_id = StubId::c1_monitorexit_nofpu_id;
221 }
222 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
223 __ jmp(_continuation);
224 }
225
226
227 // Implementation of patching:
228 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
229 // - Replace original code with a call to the stub
230 // At Runtime:
231 // - call to stub, jump to runtime
232 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
233 // - in runtime: after initializing class, restore original code, reexecute instruction
234
235 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
236
237 void PatchingStub::align_patch_site(MacroAssembler* masm) {
238 // We're patching a 5-7 byte instruction on intel and we need to
239 // make sure that we don't see a piece of the instruction. It
240 // appears mostly impossible on Intel to simply invalidate other
241 // processors caches and since they may do aggressive prefetch it's
242 // very hard to make a guess about what code might be in the icache.
243 // Force the instruction to be double word aligned so that it
244 // doesn't span a cache line.
245 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
246 }
247
248 void PatchingStub::emit_code(LIR_Assembler* ce) {
249 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
250
251 Label call_patch;
252
253 // static field accesses have special semantics while the class
254 // initializer is being run so we emit a test which can be used to
255 // check that this code is being executed by the initializing
256 // thread.
257 address being_initialized_entry = __ pc();
258 if (CommentedAssembly) {
259 __ block_comment(" patch template");
260 }
261 if (_id == load_klass_id) {
262 // produce a copy of the load klass instruction for use by the being initialized case
263 #ifdef ASSERT
264 address start = __ pc();
265 #endif
266 Metadata* o = nullptr;
267 __ mov_metadata(_obj, o);
268 #ifdef ASSERT
269 for (int i = 0; i < _bytes_to_copy; i++) {
270 address ptr = (address)(_pc_start + i);
271 int a_byte = (*ptr) & 0xFF;
272 assert(a_byte == *start++, "should be the same code");
273 }
274 #endif
275 } else if (_id == load_mirror_id) {
276 // produce a copy of the load mirror instruction for use by the being
277 // initialized case
278 #ifdef ASSERT
279 address start = __ pc();
280 #endif
281 jobject o = nullptr;
282 __ movoop(_obj, o);
283 #ifdef ASSERT
284 for (int i = 0; i < _bytes_to_copy; i++) {
285 address ptr = (address)(_pc_start + i);
286 int a_byte = (*ptr) & 0xFF;
287 assert(a_byte == *start++, "should be the same code");
288 }
289 #endif
290 } else {
291 // make a copy the code which is going to be patched.
292 for (int i = 0; i < _bytes_to_copy; i++) {
293 address ptr = (address)(_pc_start + i);
294 int a_byte = (*ptr) & 0xFF;
295 __ emit_int8(a_byte);
296 *ptr = 0x90; // make the site look like a nop
297 }
298 }
299
300 address end_of_patch = __ pc();
301 int bytes_to_skip = 0;
302 if (_id == load_mirror_id) {
303 int offset = __ offset();
304 if (CommentedAssembly) {
305 __ block_comment(" being_initialized check");
306 }
307 assert(_obj != noreg, "must be a valid register");
308 Register tmp = rax;
309 __ push_ppx(tmp);
310 __ movptr(tmp, Address(_obj, java_lang_Class::klass_offset()));
311 __ cmpptr(r15_thread, Address(tmp, InstanceKlass::init_thread_offset()));
312 __ pop_ppx(tmp); // pop it right away, no matter which path we take
313 __ jccb(Assembler::notEqual, call_patch);
314
315 // access_field patches may execute the patched code before it's
316 // copied back into place so we need to jump back into the main
317 // code of the nmethod to continue execution.
318 __ jmp(_patch_site_continuation);
319
320 // make sure this extra code gets skipped
321 bytes_to_skip += __ offset() - offset;
322 }
323 if (CommentedAssembly) {
324 __ block_comment("patch data encoded as movl");
325 }
326 // Now emit the patch record telling the runtime how to find the
327 // pieces of the patch. We only need 3 bytes but for readability of
328 // the disassembly we make the data look like a movl reg, imm32,
329 // which requires 5 bytes
330 int sizeof_patch_record = 5;
331 bytes_to_skip += sizeof_patch_record;
332
333 // emit the offsets needed to find the code to patch
334 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
335
336 __ emit_int8((unsigned char)0xB8);
337 __ emit_int8(0);
338 __ emit_int8(being_initialized_entry_offset);
339 __ emit_int8(bytes_to_skip);
340 __ emit_int8(_bytes_to_copy);
341 address patch_info_pc = __ pc();
342 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
343
344 address entry = __ pc();
345 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
346 address target = nullptr;
347 relocInfo::relocType reloc_type = relocInfo::none;
348 switch (_id) {
349 case access_field_id: target = Runtime1::entry_for(StubId::c1_access_field_patching_id); break;
350 case load_klass_id: target = Runtime1::entry_for(StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
351 case load_mirror_id: target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
352 case load_appendix_id: target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
353 default: ShouldNotReachHere();
354 }
355 __ bind(call_patch);
356
357 if (CommentedAssembly) {
358 __ block_comment("patch entry point");
359 }
360 __ call(RuntimeAddress(target));
361 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
362 ce->add_call_info_here(_info);
363 int jmp_off = __ offset();
364 __ jmp(_patch_site_entry);
365 // Add enough nops so deoptimization can overwrite the jmp above with a call
366 // and not destroy the world. We cannot use fat nops here, since the concurrent
367 // code rewrite may transiently create the illegal instruction sequence.
368 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
369 __ nop();
370 }
371 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
372 CodeSection* cs = __ code_section();
373 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
374 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
375 }
376 }
377
378
379 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
380 __ bind(_entry);
381 ce->store_parameter(_trap_request, 0);
382 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_deoptimize_id)));
383 ce->add_call_info_here(_info);
384 DEBUG_ONLY(__ should_not_reach_here());
385 }
386
387
388 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
389 address a;
390 if (_info->deoptimize_on_exception()) {
391 // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
392 a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id);
393 } else {
394 a = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id);
395 }
396
397 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
398 __ bind(_entry);
399 __ call(RuntimeAddress(a));
400 ce->add_call_info_here(_info);
401 ce->verify_oop_map(_info);
402 DEBUG_ONLY(__ should_not_reach_here());
403 }
404
405
406 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
407 assert(__ rsp_offset() == 0, "frame size should be fixed");
408
409 __ bind(_entry);
410 // pass the object on stack because all registers must be preserved
411 if (_obj->is_cpu_register()) {
412 ce->store_parameter(_obj->as_register(), 0);
413 }
414 __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
415 ce->add_call_info_here(_info);
416 DEBUG_ONLY(__ should_not_reach_here());
417 }
418
419
420 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
421 //---------------slow case: call to native-----------------
422 __ bind(_entry);
423 // Figure out where the args should go
424 // This should really convert the IntrinsicID to the Method* and signature
425 // but I don't know how to do that.
426 //
427 VMRegPair args[5];
428 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
429 SharedRuntime::java_calling_convention(signature, args, 5);
430
431 // push parameters
432 // (src, src_pos, dest, destPos, length)
433 Register r[5];
434 r[0] = src()->as_register();
435 r[1] = src_pos()->as_register();
436 r[2] = dst()->as_register();
437 r[3] = dst_pos()->as_register();
438 r[4] = length()->as_register();
439
440 // next registers will get stored on the stack
441 for (int i = 0; i < 5 ; i++ ) {
442 VMReg r_1 = args[i].first();
443 if (r_1->is_stack()) {
444 int st_off = r_1->reg2stack() * wordSize;
445 __ movptr (Address(rsp, st_off), r[i]);
446 } else {
447 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
448 }
449 }
450
451 ce->align_call(lir_static_call);
452
453 ce->emit_static_call_stub();
454 if (ce->compilation()->bailed_out()) {
455 return; // CodeCache is full
456 }
457 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
458 relocInfo::static_call_type);
459 __ call(resolve);
460 ce->add_call_info_here(info());
461
462 #ifndef PRODUCT
463 if (PrintC1Statistics) {
464 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt), rscratch1);
465 }
466 #endif
467
468 __ jmp(_continuation);
469 }
470
471 #undef __