1 /*
2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Defs.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/cardTable.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "memory/universe.hpp"
38 #include "nativeInst_riscv.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "register_riscv.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/signature.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "runtime/vframe.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 #include "vmreg_riscv.inline.hpp"
49
50
51 // Implementation of StubAssembler
52
53 int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, int args_size) {
54 // setup registers
55 assert(!(oop_result->is_valid() || metadata_result->is_valid()) || oop_result != metadata_result,
56 "registers must be different");
57 assert(oop_result != xthread && metadata_result != xthread, "registers must be different");
58 assert(args_size >= 0, "illegal args_size");
59 bool align_stack = false;
60
61 mv(c_rarg0, xthread);
62 set_num_rt_args(0); // Nothing on stack
63
64 Label retaddr;
65 set_last_Java_frame(sp, fp, retaddr, t0);
66
67 // do the call
68 rt_call(entry);
69 bind(retaddr);
70 int call_offset = offset();
71 // verify callee-saved register
72 #ifdef ASSERT
73 push_reg(x10, sp);
74 { Label L;
75 get_thread(x10);
76 beq(xthread, x10, L);
77 stop("StubAssembler::call_RT: xthread not callee saved?");
78 bind(L);
79 }
80 pop_reg(x10, sp);
81 #endif
82 reset_last_Java_frame(true);
83
84 // check for pending exceptions
85 { Label L;
86 // check for pending exceptions (java_thread is set upon return)
87 ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
88 beqz(t0, L);
89 // exception pending => remove activation and forward to exception handler
90 // make sure that the vm_results are cleared
91 if (oop_result->is_valid()) {
92 sd(zr, Address(xthread, JavaThread::vm_result_oop_offset()));
93 }
94 if (metadata_result->is_valid()) {
95 sd(zr, Address(xthread, JavaThread::vm_result_metadata_offset()));
96 }
97 if (frame_size() == no_frame_size) {
98 leave();
99 far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
100 } else if (_stub_id == (int)StubId::c1_forward_exception_id) {
101 should_not_reach_here();
102 } else {
103 far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_forward_exception_id)));
104 }
105 bind(L);
106 }
107 // get oop results if there are any and reset the values in the thread
108 if (oop_result->is_valid()) {
109 get_vm_result_oop(oop_result, xthread);
110 }
111 if (metadata_result->is_valid()) {
112 get_vm_result_metadata(metadata_result, xthread);
113 }
114 return call_offset;
115 }
116
117 int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, Register arg1) {
118 mv(c_rarg1, arg1);
119 return call_RT(oop_result, metadata_result, entry, 1);
120 }
121
122 int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, Register arg1, Register arg2) {
123 const int arg_num = 2;
124 if (c_rarg1 == arg2) {
125 if (c_rarg2 == arg1) {
126 xorr(arg1, arg1, arg2);
127 xorr(arg2, arg1, arg2);
128 xorr(arg1, arg1, arg2);
129 } else {
130 mv(c_rarg2, arg2);
131 mv(c_rarg1, arg1);
132 }
133 } else {
134 mv(c_rarg1, arg1);
135 mv(c_rarg2, arg2);
136 }
137 return call_RT(oop_result, metadata_result, entry, arg_num);
138 }
139
140 int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
141 const int arg_num = 3;
142 // if there is any conflict use the stack
143 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
144 arg2 == c_rarg1 || arg2 == c_rarg3 ||
145 arg3 == c_rarg1 || arg3 == c_rarg2) {
146 const int arg1_sp_offset = 0;
147 const int arg2_sp_offset = 1;
148 const int arg3_sp_offset = 2;
149 subi(sp, sp, (arg_num + 1) * wordSize);
150 sd(arg1, Address(sp, arg1_sp_offset * wordSize));
151 sd(arg2, Address(sp, arg2_sp_offset * wordSize));
152 sd(arg3, Address(sp, arg3_sp_offset * wordSize));
153
154 ld(c_rarg1, Address(sp, arg1_sp_offset * wordSize));
155 ld(c_rarg2, Address(sp, arg2_sp_offset * wordSize));
156 ld(c_rarg3, Address(sp, arg3_sp_offset * wordSize));
157 addi(sp, sp, (arg_num + 1) * wordSize);
158 } else {
159 mv(c_rarg1, arg1);
160 mv(c_rarg2, arg2);
161 mv(c_rarg3, arg3);
162 }
163 return call_RT(oop_result, metadata_result, entry, arg_num);
164 }
165
166 enum return_state_t {
167 does_not_return, requires_return, requires_pop_epilogue_return
168 };
169
170 // Implementation of StubFrame
171
172 class StubFrame: public StackObj {
173 private:
174 StubAssembler* _sasm;
175 return_state_t _return_state;
176
177 public:
178 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state=requires_return);
179 void load_argument(int offset_in_words, Register reg);
180
181 ~StubFrame();
182 };;
183
184 void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
185 set_info(name, must_gc_arguments);
186 enter();
187 }
188
189 void StubAssembler::epilogue(bool use_pop) {
190 // Avoid using a leave instruction when this frame may
191 // have been frozen, since the current value of fp
192 // restored from the stub would be invalid. We still
193 // must restore the fp value saved on enter though.
194 if (use_pop) {
195 ld(fp, Address(sp));
196 ld(ra, Address(sp, wordSize));
197 addi(sp, sp, 2 * wordSize);
198 } else {
199 leave();
200 }
201 ret();
202 }
203
204 #define __ _sasm->
205
206 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state) {
207 _sasm = sasm;
208 _return_state = return_state;
209 __ prologue(name, must_gc_arguments);
210 }
211
212 // load parameters that were stored with LIR_Assembler::store_parameter
213 // Note: offsets for store_parameter and load_argument must match
214 void StubFrame::load_argument(int offset_in_words, Register reg) {
215 __ load_parameter(offset_in_words, reg);
216 }
217
218
219 StubFrame::~StubFrame() {
220 if (_return_state == does_not_return) {
221 __ should_not_reach_here();
222 } else {
223 __ epilogue(_return_state == requires_pop_epilogue_return);
224 }
225 _sasm = nullptr;
226 }
227
228 #undef __
229
230
231 // Implementation of Runtime1
232
233 #define __ sasm->
234
235 // Stack layout for saving/restoring all the registers needed during a runtime
236 // call (this includes deoptimization)
237 // Note: note that users of this frame may well have arguments to some runtime
238 // while these values are on the stack. These positions neglect those arguments
239 // but the code in save_live_registers will take the argument count into
240 // account.
241 //
242
243 enum reg_save_layout {
244 reg_save_frame_size = 32 /* float */ + 30 /* integer excluding x3, x4 */
245 };
246
247 // Save off registers which might be killed by calls into the runtime.
248 // Tries to smart of about FPU registers. In particular we separate
249 // saving and describing the FPU registers for deoptimization since we
250 // have to save the FPU registers twice if we describe them. The
251 // deopt blob is the only thing which needs to describe FPU registers.
252 // In all other cases it should be sufficient to simply save their
253 // current value.
254
255 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
256 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
257
258 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
259 int frame_size_in_bytes = reg_save_frame_size * BytesPerWord;
260 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
261 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
262 OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
263 assert_cond(oop_map != nullptr);
264
265 // caller save registers only, see FrameMap::initialize
266 // in c1_FrameMap_riscv.cpp for detail.
267 const static Register caller_save_cpu_regs[FrameMap::max_nof_caller_save_cpu_regs] = {
268 x7, x10, x11, x12, x13, x14, x15, x16, x17, x28, x29, x30, x31
269 };
270
271 for (int i = 0; i < FrameMap::max_nof_caller_save_cpu_regs; i++) {
272 Register r = caller_save_cpu_regs[i];
273 int sp_offset = cpu_reg_save_offsets[r->encoding()];
274 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
275 r->as_VMReg());
276 }
277
278 int sp_offset = cpu_reg_save_offsets[xthread->encoding()];
279 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
280 xthread->as_VMReg());
281
282 // fpu_regs
283 if (save_fpu_registers) {
284 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
285 FloatRegister r = as_FloatRegister(i);
286 int sp_offset = fpu_reg_save_offsets[i];
287 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
288 r->as_VMReg());
289 }
290 }
291 return oop_map;
292 }
293
294 static OopMap* save_live_registers(StubAssembler* sasm,
295 bool save_fpu_registers = true) {
296 __ block_comment("save_live_registers");
297
298 // if the number of pushed regs is odd, one slot will be reserved for alignment
299 __ push_reg(RegSet::range(x5, x31), sp); // integer registers except ra(x1) & sp(x2) & gp(x3) & tp(x4)
300
301 if (save_fpu_registers) {
302 // float registers
303 __ subi(sp, sp, FrameMap::nof_fpu_regs * wordSize);
304 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
305 __ fsd(as_FloatRegister(i), Address(sp, i * wordSize));
306 }
307 } else {
308 // we define reg_save_layout = 62 as the fixed frame size,
309 // we should also sub 32 * wordSize to sp when save_fpu_registers == false
310 __ subi(sp, sp, 32 * wordSize);
311 }
312
313 return generate_oop_map(sasm, save_fpu_registers);
314 }
315
316 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
317 if (restore_fpu_registers) {
318 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
319 __ fld(as_FloatRegister(i), Address(sp, i * wordSize));
320 }
321 __ addi(sp, sp, FrameMap::nof_fpu_regs * wordSize);
322 } else {
323 // we define reg_save_layout = 64 as the fixed frame size,
324 // we should also add 32 * wordSize to sp when save_fpu_registers == false
325 __ addi(sp, sp, 32 * wordSize);
326 }
327
328 // if the number of popped regs is odd, the reserved slot for alignment will be removed
329 __ pop_reg(RegSet::range(x5, x31), sp); // integer registers except ra(x1) & sp(x2) & gp(x3) & tp(x4)
330 }
331
332 static void restore_live_registers_except_r10(StubAssembler* sasm, bool restore_fpu_registers = true) {
333 if (restore_fpu_registers) {
334 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
335 __ fld(as_FloatRegister(i), Address(sp, i * wordSize));
336 }
337 __ addi(sp, sp, FrameMap::nof_fpu_regs * wordSize);
338 } else {
339 // we define reg_save_layout = 64 as the fixed frame size,
340 // we should also add 32 * wordSize to sp when save_fpu_registers == false
341 __ addi(sp, sp, 32 * wordSize);
342 }
343
344 // pop integer registers except ra(x1) & sp(x2) & gp(x3) & tp(x4) & x10
345 // there is one reserved slot for alignment on the stack in save_live_registers().
346 __ pop_reg(RegSet::range(x5, x9), sp); // pop x5 ~ x9 with the reserved slot for alignment
347 __ pop_reg(RegSet::range(x11, x31), sp); // pop x11 ~ x31; x10 will be automatically skipped here
348 }
349
350 void Runtime1::initialize_pd() {
351 int i = 0;
352 int sp_offset = 0;
353 const int step = 2; // SP offsets are in halfwords
354
355 // all float registers are saved explicitly
356 for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
357 fpu_reg_save_offsets[i] = sp_offset;
358 sp_offset += step;
359 }
360
361 // a slot reserved for stack 16-byte alignment, see MacroAssembler::push_reg
362 sp_offset += step;
363 // we save x5 ~ x31, except x0 ~ x4: loop starts from x5
364 for (i = 5; i < FrameMap::nof_cpu_regs; i++) {
365 cpu_reg_save_offsets[i] = sp_offset;
366 sp_offset += step;
367 }
368 }
369
370 // return: offset in 64-bit words.
371 uint Runtime1::runtime_blob_current_thread_offset(frame f) {
372 CodeBlob* cb = f.cb();
373 assert(cb == Runtime1::blob_for(StubId::c1_monitorenter_id) ||
374 cb == Runtime1::blob_for(StubId::c1_monitorenter_nofpu_id), "must be");
375 assert(cb != nullptr && cb->is_runtime_stub(), "invalid frame");
376 int offset = cpu_reg_save_offsets[xthread->encoding()];
377 return offset / 2; // SP offsets are in halfwords
378 }
379
380 // target: the entry point of the method that creates and posts the exception oop
381 // has_argument: true if the exception needs arguments (passed in t0 and t1)
382
383 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
384 // make a frame and preserve the caller's caller-save registers
385 OopMap* oop_map = save_live_registers(sasm);
386 assert_cond(oop_map != nullptr);
387 int call_offset = 0;
388 if (!has_argument) {
389 call_offset = __ call_RT(noreg, noreg, target);
390 } else {
391 __ mv(c_rarg1, t0);
392 __ mv(c_rarg2, t1);
393 call_offset = __ call_RT(noreg, noreg, target);
394 }
395 OopMapSet* oop_maps = new OopMapSet();
396 assert_cond(oop_maps != nullptr);
397 oop_maps->add_gc_map(call_offset, oop_map);
398
399 return oop_maps;
400 }
401
402 OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler *sasm) {
403 __ block_comment("generate_handle_exception");
404
405 // incoming parameters
406 const Register exception_oop = x10;
407 const Register exception_pc = x13;
408
409 OopMapSet* oop_maps = new OopMapSet();
410 assert_cond(oop_maps != nullptr);
411 OopMap* oop_map = nullptr;
412
413 switch (id) {
414 case StubId::c1_forward_exception_id:
415 // We're handling an exception in the context of a compiled frame.
416 // The registers have been saved in the standard places. Perform
417 // an exception lookup in the caller and dispatch to the handler
418 // if found. Otherwise unwind and dispatch to the callers
419 // exception handler.
420 oop_map = generate_oop_map(sasm, 1 /* thread */);
421
422 // load and clear pending exception oop into x10
423 __ ld(exception_oop, Address(xthread, Thread::pending_exception_offset()));
424 __ sd(zr, Address(xthread, Thread::pending_exception_offset()));
425
426 // load issuing PC (the return address for this stub) into x13
427 __ ld(exception_pc, Address(fp, frame::return_addr_offset * BytesPerWord));
428
429 // make sure that the vm_results are cleared (may be unnecessary)
430 __ sd(zr, Address(xthread, JavaThread::vm_result_oop_offset()));
431 __ sd(zr, Address(xthread, JavaThread::vm_result_metadata_offset()));
432 break;
433 case StubId::c1_handle_exception_nofpu_id:
434 case StubId::c1_handle_exception_id:
435 // At this point all registers MAY be live.
436 oop_map = save_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id);
437 break;
438 case StubId::c1_handle_exception_from_callee_id: {
439 // At this point all registers except exception oop (x10) and
440 // exception pc (ra) are dead.
441 const int frame_size = 2 /* fp, return address */;
442 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
443 sasm->set_frame_size(frame_size);
444 break;
445 }
446 default: ShouldNotReachHere();
447 }
448
449 // verify that only x10 and x13 are valid at this time
450 __ invalidate_registers(false, true, true, false, true, true);
451 // verify that x10 contains a valid exception
452 __ verify_not_null_oop(exception_oop);
453
454 #ifdef ASSERT
455 // check that fields in JavaThread for exception oop and issuing pc are
456 // empty before writing to them
457 Label oop_empty;
458 __ ld(t0, Address(xthread, JavaThread::exception_oop_offset()));
459 __ beqz(t0, oop_empty);
460 __ stop("exception oop already set");
461 __ bind(oop_empty);
462
463 Label pc_empty;
464 __ ld(t0, Address(xthread, JavaThread::exception_pc_offset()));
465 __ beqz(t0, pc_empty);
466 __ stop("exception pc already set");
467 __ bind(pc_empty);
468 #endif
469
470 // save exception oop and issuing pc into JavaThread
471 // (exception handler will load it from here)
472 __ sd(exception_oop, Address(xthread, JavaThread::exception_oop_offset()));
473 __ sd(exception_pc, Address(xthread, JavaThread::exception_pc_offset()));
474
475 // patch throwing pc into return address (has bci & oop map)
476 __ sd(exception_pc, Address(fp, frame::return_addr_offset * BytesPerWord));
477
478 // compute the exception handler.
479 // the exception oop and the throwing pc are read from the fields in JavaThread
480 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
481 guarantee(oop_map != nullptr, "null oop_map!");
482 oop_maps->add_gc_map(call_offset, oop_map);
483
484 // x10: handler address
485 // will be the deopt blob if nmethod was deoptimized while we looked up
486 // handler regardless of whether handler existed in the nmethod.
487
488 // only x10 is valid at this time, all other registers have been destroyed by the runtime call
489 __ invalidate_registers(false, true, true, true, true, true);
490
491 // patch the return address, this stub will directly return to the exception handler
492 __ sd(x10, Address(fp, frame::return_addr_offset * BytesPerWord));
493
494 switch (id) {
495 case StubId::c1_forward_exception_id:
496 case StubId::c1_handle_exception_nofpu_id:
497 case StubId::c1_handle_exception_id:
498 // Restore the registers that were saved at the beginning.
499 restore_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id);
500 break;
501 case StubId::c1_handle_exception_from_callee_id:
502 break;
503 default: ShouldNotReachHere();
504 }
505
506 return oop_maps;
507 }
508
509
510 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
511 // incoming parameters
512 const Register exception_oop = x10;
513 // other registers used in this stub
514 const Register handler_addr = x11;
515
516 if (AbortVMOnException) {
517 __ enter();
518 save_live_registers(sasm);
519 __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), x10);
520 restore_live_registers(sasm);
521 __ leave();
522 }
523
524 // verify that only x10, is valid at this time
525 __ invalidate_registers(false, true, true, true, true, true);
526
527 #ifdef ASSERT
528 // check that fields in JavaThread for exception oop and issuing pc are empty
529 Label oop_empty;
530 __ ld(t0, Address(xthread, JavaThread::exception_oop_offset()));
531 __ beqz(t0, oop_empty);
532 __ stop("exception oop must be empty");
533 __ bind(oop_empty);
534
535 Label pc_empty;
536 __ ld(t0, Address(xthread, JavaThread::exception_pc_offset()));
537 __ beqz(t0, pc_empty);
538 __ stop("exception pc must be empty");
539 __ bind(pc_empty);
540 #endif
541
542 // Save our return address because
543 // exception_handler_for_return_address will destroy it. We also
544 // save exception_oop
545 __ subi(sp, sp, 2 * wordSize);
546 __ sd(exception_oop, Address(sp, wordSize));
547 __ sd(ra, Address(sp));
548
549 // search the exception handler address of the caller (using the return address)
550 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), xthread, ra);
551 // x10: exception handler address of the caller
552
553 // Only x10 is valid at this time; all other registers have been
554 // destroyed by the call.
555 __ invalidate_registers(false, true, true, true, false, true);
556
557 // move result of call into correct register
558 __ mv(handler_addr, x10);
559
560 // get throwing pc (= return address).
561 // ra has been destroyed by the call
562 __ ld(ra, Address(sp));
563 __ ld(exception_oop, Address(sp, wordSize));
564 __ addi(sp, sp, 2 * wordSize);
565 __ mv(x13, ra);
566
567 __ verify_not_null_oop(exception_oop);
568
569 // continue at exception handler (return address removed)
570 // note: do *not* remove arguments when unwinding the
571 // activation since the caller assumes having
572 // all arguments on the stack when entering the
573 // runtime to determine the exception handler
574 // (GC happens at call site with arguments!)
575 // x10: exception oop
576 // x13: throwing pc
577 // x11: exception handler
578 __ jr(handler_addr);
579 }
580
581 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
582 // use the maximum number of runtime-arguments here because it is difficult to
583 // distinguish each RT-Call.
584 // Note: This number affects also the RT-Call in generate_handle_exception because
585 // the oop-map is shared for all calls.
586 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
587 assert(deopt_blob != nullptr, "deoptimization blob must have been created");
588
589 OopMap* oop_map = save_live_registers(sasm);
590 assert_cond(oop_map != nullptr);
591
592 __ mv(c_rarg0, xthread);
593 Label retaddr;
594 __ set_last_Java_frame(sp, fp, retaddr, t0);
595 // do the call
596 __ rt_call(target);
597 __ bind(retaddr);
598 OopMapSet* oop_maps = new OopMapSet();
599 assert_cond(oop_maps != nullptr);
600 oop_maps->add_gc_map(__ offset(), oop_map);
601 // verify callee-saved register
602 #ifdef ASSERT
603 { Label L;
604 __ get_thread(t0);
605 __ beq(xthread, t0, L);
606 __ stop("StubAssembler::call_RT: xthread not callee saved?");
607 __ bind(L);
608 }
609 #endif
610 __ reset_last_Java_frame(true);
611
612 #ifdef ASSERT
613 // Check that fields in JavaThread for exception oop and issuing pc are empty
614 Label oop_empty;
615 __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
616 __ beqz(t0, oop_empty);
617 __ stop("exception oop must be empty");
618 __ bind(oop_empty);
619
620 Label pc_empty;
621 __ ld(t0, Address(xthread, JavaThread::exception_pc_offset()));
622 __ beqz(t0, pc_empty);
623 __ stop("exception pc must be empty");
624 __ bind(pc_empty);
625 #endif
626
627 // Runtime will return true if the nmethod has been deoptimized, this is the
628 // expected scenario and anything else is an error. Note that we maintain a
629 // check on the result purely as a defensive measure.
630 Label no_deopt;
631 __ beqz(x10, no_deopt); // Have we deoptimized?
632
633 // Perform a re-execute. The proper return address is already on the stack,
634 // we just need to restore registers, pop all of our frames but the return
635 // address and jump to the deopt blob.
636
637 restore_live_registers(sasm);
638 __ leave();
639 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
640
641 __ bind(no_deopt);
642 __ stop("deopt not performed");
643
644 return oop_maps;
645 }
646
647 OopMapSet* Runtime1::generate_code_for(StubId id, StubAssembler* sasm) {
648 // for better readability
649 const bool dont_gc_arguments = false;
650
651 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
652 bool save_fpu_registers = true;
653
654 // stub code & info for the different stubs
655 OopMapSet* oop_maps = nullptr;
656 switch (id) {
657 {
658 case StubId::c1_forward_exception_id:
659 {
660 oop_maps = generate_handle_exception(id, sasm);
661 __ leave();
662 __ ret();
663 }
664 break;
665
666 case StubId::c1_throw_div0_exception_id:
667 {
668 StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
669 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
670 }
671 break;
672
673 case StubId::c1_throw_null_pointer_exception_id:
674 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
675 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
676 }
677 break;
678
679 case StubId::c1_new_instance_id:
680 case StubId::c1_fast_new_instance_id:
681 case StubId::c1_fast_new_instance_init_check_id:
682 {
683 Register klass = x13; // Incoming
684 Register obj = x10; // Result
685
686 if (id == StubId::c1_new_instance_id) {
687 __ set_info("new_instance", dont_gc_arguments);
688 } else if (id == StubId::c1_fast_new_instance_id) {
689 __ set_info("fast new_instance", dont_gc_arguments);
690 } else {
691 assert(id == StubId::c1_fast_new_instance_init_check_id, "bad StubId");
692 __ set_info("fast new_instance init check", dont_gc_arguments);
693 }
694
695 __ enter();
696 OopMap* map = save_live_registers(sasm);
697 assert_cond(map != nullptr);
698 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
699 oop_maps = new OopMapSet();
700 assert_cond(oop_maps != nullptr);
701 oop_maps->add_gc_map(call_offset, map);
702 restore_live_registers_except_r10(sasm);
703 __ verify_oop(obj);
704 __ leave();
705 __ ret();
706
707 // x10: new instance
708 }
709
710 break;
711
712 case StubId::c1_counter_overflow_id:
713 {
714 Register bci = x10;
715 Register method = x11;
716 __ enter();
717 OopMap* map = save_live_registers(sasm);
718 assert_cond(map != nullptr);
719
720 const int bci_off = 0;
721 const int method_off = 1;
722 // Retrieve bci
723 __ lw(bci, Address(fp, bci_off * BytesPerWord));
724 // And a pointer to the Method*
725 __ ld(method, Address(fp, method_off * BytesPerWord));
726 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
727 oop_maps = new OopMapSet();
728 assert_cond(oop_maps != nullptr);
729 oop_maps->add_gc_map(call_offset, map);
730 restore_live_registers(sasm);
731 __ leave();
732 __ ret();
733 }
734 break;
735
736 case StubId::c1_new_type_array_id:
737 case StubId::c1_new_object_array_id:
738 {
739 Register length = x9; // Incoming
740 Register klass = x13; // Incoming
741 Register obj = x10; // Result
742
743 if (id == StubId::c1_new_type_array_id) {
744 __ set_info("new_type_array", dont_gc_arguments);
745 } else {
746 __ set_info("new_object_array", dont_gc_arguments);
747 }
748
749 #ifdef ASSERT
750 // assert object type is really an array of the proper kind
751 {
752 Label ok;
753 Register tmp = obj;
754 __ lwu(tmp, Address(klass, Klass::layout_helper_offset()));
755 __ sraiw(tmp, tmp, Klass::_lh_array_tag_shift);
756 int tag = ((id == StubId::c1_new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value);
757 __ mv(t0, tag);
758 __ beq(t0, tmp, ok);
759 __ stop("assert(is an array klass)");
760 __ should_not_reach_here();
761 __ bind(ok);
762 }
763 #endif // ASSERT
764
765 __ enter();
766 OopMap* map = save_live_registers(sasm);
767 assert_cond(map != nullptr);
768 int call_offset = 0;
769 if (id == StubId::c1_new_type_array_id) {
770 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
771 } else {
772 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
773 }
774
775 oop_maps = new OopMapSet();
776 assert_cond(oop_maps != nullptr);
777 oop_maps->add_gc_map(call_offset, map);
778 restore_live_registers_except_r10(sasm);
779
780 __ verify_oop(obj);
781 __ leave();
782 __ ret();
783
784 // x10: new array
785 }
786 break;
787
788 case StubId::c1_new_multi_array_id:
789 {
790 StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
791 // x10: klass
792 // x9: rank
793 // x12: address of 1st dimension
794 OopMap* map = save_live_registers(sasm);
795 assert_cond(map != nullptr);
796 __ mv(c_rarg1, x10);
797 __ mv(c_rarg3, x12);
798 __ mv(c_rarg2, x9);
799 int call_offset = __ call_RT(x10, noreg, CAST_FROM_FN_PTR(address, new_multi_array), x11, x12, x13);
800
801 oop_maps = new OopMapSet();
802 assert_cond(oop_maps != nullptr);
803 oop_maps->add_gc_map(call_offset, map);
804 restore_live_registers_except_r10(sasm);
805
806 // x10: new multi array
807 __ verify_oop(x10);
808 }
809 break;
810
811 case StubId::c1_register_finalizer_id:
812 {
813 __ set_info("register_finalizer", dont_gc_arguments);
814
815 // This is called via call_runtime so the arguments
816 // will be place in C abi locations
817 __ verify_oop(c_rarg0);
818
819 // load the klass and check the has finalizer flag
820 Label register_finalizer;
821 Register t = x15;
822 __ load_klass(t, x10);
823 __ lbu(t, Address(t, Klass::misc_flags_offset()));
824 __ test_bit(t0, t, exact_log2(KlassFlags::_misc_has_finalizer));
825 __ bnez(t0, register_finalizer);
826 __ ret();
827
828 __ bind(register_finalizer);
829 __ enter();
830 OopMap* oop_map = save_live_registers(sasm);
831 assert_cond(oop_map != nullptr);
832 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), x10);
833 oop_maps = new OopMapSet();
834 assert_cond(oop_maps != nullptr);
835 oop_maps->add_gc_map(call_offset, oop_map);
836
837 // Now restore all the live registers
838 restore_live_registers(sasm);
839
840 __ leave();
841 __ ret();
842 }
843 break;
844
845 case StubId::c1_throw_class_cast_exception_id:
846 {
847 StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
848 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
849 }
850 break;
851
852 case StubId::c1_throw_incompatible_class_change_error_id:
853 {
854 StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);
855 oop_maps = generate_exception_throw(sasm,
856 CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
857 }
858 break;
859
860 case StubId::c1_slow_subtype_check_id:
861 {
862 // Typical calling sequence:
863 // push klass_RInfo (object klass or other subclass)
864 // push sup_k_RInfo (array element klass or other superclass)
865 // jump to slow_subtype_check
866 // Note that the subclass is pushed first, and is therefore deepest.
867 enum layout {
868 x10_off, x10_off_hi,
869 x12_off, x12_off_hi,
870 x14_off, x14_off_hi,
871 x15_off, x15_off_hi,
872 sup_k_off, sup_k_off_hi,
873 klass_off, klass_off_hi,
874 framesize,
875 result_off = sup_k_off
876 };
877
878 __ set_info("slow_subtype_check", dont_gc_arguments);
879 __ push_reg(RegSet::of(x10, x12, x14, x15), sp);
880
881 __ ld(x14, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // sub klass
882 __ ld(x10, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // super klass
883
884 Label miss;
885 __ check_klass_subtype_slow_path(x14, /*sub_klass*/
886 x10, /*super_klass*/
887 x12, /*tmp1_reg*/
888 x15, /*tmp2_reg*/
889 nullptr, /*L_success*/
890 &miss /*L_failure*/);
891 // Need extras for table lookup: x7, x11, x13
892
893 // fallthrough on success:
894 __ mv(t0, 1);
895 __ sd(t0, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
896 __ pop_reg(RegSet::of(x10, x12, x14, x15), sp);
897 __ ret();
898
899 __ bind(miss);
900 __ sd(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
901 __ pop_reg(RegSet::of(x10, x12, x14, x15), sp);
902 __ ret();
903 }
904 break;
905
906 case StubId::c1_monitorenter_nofpu_id:
907 save_fpu_registers = false;
908 // fall through
909 case StubId::c1_monitorenter_id:
910 {
911 StubFrame f(sasm, "monitorenter", dont_gc_arguments, requires_pop_epilogue_return);
912 OopMap* map = save_live_registers(sasm, save_fpu_registers);
913 assert_cond(map != nullptr);
914
915 // Called with store_parameter and not C abi
916 f.load_argument(1, x10); // x10: object
917 f.load_argument(0, x11); // x11: lock address
918
919 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), x10, x11);
920
921 oop_maps = new OopMapSet();
922 assert_cond(oop_maps != nullptr);
923 oop_maps->add_gc_map(call_offset, map);
924 restore_live_registers(sasm, save_fpu_registers);
925 }
926 break;
927
928 case StubId::c1_is_instance_of_id:
929 {
930 // Mirror: x10
931 // Object: x11
932 // Temps: x13, x14, x15, x16, x17
933 // Result: x10
934
935 // Get the Klass* into x16
936 Register klass = x16, obj = x11, result = x10;
937 __ ld(klass, Address(x10, java_lang_Class::klass_offset()));
938
939 Label fail, is_secondary, success;
940
941 __ beqz(klass, fail); // Klass is null
942 __ beqz(obj, fail); // obj is null
943
944 __ lwu(x13, Address(klass, in_bytes(Klass::super_check_offset_offset())));
945 __ mv(x17, in_bytes(Klass::secondary_super_cache_offset()));
946 __ beq(x13, x17, is_secondary); // Klass is a secondary superclass
947
948 // Klass is a concrete class
949 __ load_klass(x15, obj);
950 __ add(x17, x15, x13);
951 __ ld(x17, Address(x17));
952 __ beq(klass, x17, success);
953 __ mv(result, 0);
954 __ ret();
955
956 __ bind(is_secondary);
957 __ load_klass(obj, obj);
958
959 // This is necessary because I am never in my own secondary_super list.
960 __ beq(obj, klass, success);
961
962 __ lookup_secondary_supers_table_var(obj, klass, result, x13, x14, x15, x17, &success);
963
964 __ bind(fail);
965 __ mv(result, 0);
966 __ ret();
967
968 __ bind(success);
969 __ mv(result, 1);
970 __ ret();
971 }
972 break;
973
974 case StubId::c1_monitorexit_nofpu_id:
975 save_fpu_registers = false;
976 // fall through
977 case StubId::c1_monitorexit_id:
978 {
979 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
980 OopMap* map = save_live_registers(sasm, save_fpu_registers);
981 assert_cond(map != nullptr);
982
983 // Called with store_parameter and not C abi
984 f.load_argument(0, x10); // x10: lock address
985
986 // note: really a leaf routine but must setup last java sp
987 // => use call_RT for now (speed can be improved by
988 // doing last java sp setup manually)
989 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), x10);
990
991 oop_maps = new OopMapSet();
992 assert_cond(oop_maps != nullptr);
993 oop_maps->add_gc_map(call_offset, map);
994 restore_live_registers(sasm, save_fpu_registers);
995 }
996 break;
997
998 case StubId::c1_deoptimize_id:
999 {
1000 StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return);
1001 OopMap* oop_map = save_live_registers(sasm);
1002 assert_cond(oop_map != nullptr);
1003 f.load_argument(0, c_rarg1);
1004 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1);
1005
1006 oop_maps = new OopMapSet();
1007 assert_cond(oop_maps != nullptr);
1008 oop_maps->add_gc_map(call_offset, oop_map);
1009 restore_live_registers(sasm);
1010 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1011 assert(deopt_blob != nullptr, "deoptimization blob must have been created");
1012 __ leave();
1013 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1014 }
1015 break;
1016
1017 case StubId::c1_throw_range_check_failed_id:
1018 {
1019 StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return);
1020 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1021 }
1022 break;
1023
1024 case StubId::c1_unwind_exception_id:
1025 {
1026 __ set_info("unwind_exception", dont_gc_arguments);
1027 // note: no stubframe since we are about to leave the current
1028 // activation and we are calling a leaf VM function only.
1029 generate_unwind_exception(sasm);
1030 }
1031 break;
1032
1033 case StubId::c1_access_field_patching_id:
1034 {
1035 StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return);
1036 // we should set up register map
1037 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1038 }
1039 break;
1040
1041 case StubId::c1_load_klass_patching_id:
1042 {
1043 StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return);
1044 // we should set up register map
1045 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1046 }
1047 break;
1048
1049 case StubId::c1_load_mirror_patching_id:
1050 {
1051 StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return);
1052 // we should set up register map
1053 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1054 }
1055 break;
1056
1057 case StubId::c1_load_appendix_patching_id:
1058 {
1059 StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return);
1060 // we should set up register map
1061 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
1062 }
1063 break;
1064
1065 case StubId::c1_handle_exception_nofpu_id:
1066 case StubId::c1_handle_exception_id:
1067 {
1068 StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1069 oop_maps = generate_handle_exception(id, sasm);
1070 }
1071 break;
1072
1073 case StubId::c1_handle_exception_from_callee_id:
1074 {
1075 StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1076 oop_maps = generate_handle_exception(id, sasm);
1077 }
1078 break;
1079
1080 case StubId::c1_throw_index_exception_id:
1081 {
1082 StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return);
1083 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1084 }
1085 break;
1086
1087 case StubId::c1_throw_array_store_exception_id:
1088 {
1089 StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return);
1090 // tos + 0: link
1091 // + 1: return address
1092 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1093 }
1094 break;
1095
1096 case StubId::c1_predicate_failed_trap_id:
1097 {
1098 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return);
1099
1100 OopMap* map = save_live_registers(sasm);
1101 assert_cond(map != nullptr);
1102
1103 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1104 oop_maps = new OopMapSet();
1105 assert_cond(oop_maps != nullptr);
1106 oop_maps->add_gc_map(call_offset, map);
1107 restore_live_registers(sasm);
1108 __ leave();
1109 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1110 assert(deopt_blob != nullptr, "deoptimization blob must have been created");
1111
1112 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1113 }
1114 break;
1115
1116 case StubId::c1_dtrace_object_alloc_id:
1117 { // c_rarg0: object
1118 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1119 save_live_registers(sasm);
1120
1121 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), c_rarg0);
1122
1123 restore_live_registers(sasm);
1124 }
1125 break;
1126
1127 default:
1128 {
1129 StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1130 __ mv(x10, (int)id);
1131 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), x10);
1132 __ should_not_reach_here();
1133 }
1134 break;
1135 }
1136 }
1137 return oop_maps;
1138 }
1139
1140 #undef __
1141
1142 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); }