1 /*
2 * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4 * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "asm/assembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "code/compiledIC.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "nativeInst_riscv.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "utilities/powerOfTwo.hpp"
44 #include "vmreg_riscv.inline.hpp"
45
46 #ifndef PRODUCT
47 #define COMMENT(x) do { __ block_comment(x); } while (0)
48 #else
49 #define COMMENT(x)
50 #endif
51
52 NEEDS_CLEANUP // remove this definitions ?
53 const Register SYNC_header = x10; // synchronization header
54 const Register SHIFT_count = x10; // where count for shift operations must be
55
56 #define __ _masm->
57
58 static void select_different_registers(Register preserve,
59 Register extra,
60 Register &tmp1,
61 Register &tmp2,
62 Register &tmp3) {
63 if (tmp1 == preserve) {
64 assert_different_registers(tmp1, tmp2, tmp3, extra);
65 tmp1 = extra;
66 } else if (tmp2 == preserve) {
67 assert_different_registers(tmp1, tmp2, tmp3, extra);
68 tmp2 = extra;
69 } else if (tmp3 == preserve) {
70 assert_different_registers(tmp1, tmp2, tmp3, extra);
71 tmp3 = extra;
72 }
73 assert_different_registers(preserve, tmp1, tmp2, tmp3);
74 }
75
76 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
77
78 void LIR_Assembler::clinit_barrier(ciMethod* method) {
79 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
80 assert(!method->holder()->is_not_initialized(), "initialization should have been started");
81
82 Label L_skip_barrier;
83
84 __ mov_metadata(t1, method->holder()->constant_encoding());
85 __ clinit_barrier(t1, t0, &L_skip_barrier /* L_fast_path */);
86 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
87 __ bind(L_skip_barrier);
88 }
89
90 LIR_Opr LIR_Assembler::receiverOpr() {
91 return FrameMap::receiver_opr;
92 }
93
94 LIR_Opr LIR_Assembler::osrBufferPointer() {
95 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
96 }
97
98 void LIR_Assembler::breakpoint() { Unimplemented(); }
99
100 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
101
102 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
103
104 static jlong as_long(LIR_Opr data) {
105 jlong result;
106 switch (data->type()) {
107 case T_INT:
108 result = (data->as_jint());
109 break;
110 case T_LONG:
111 result = (data->as_jlong());
112 break;
113 default:
114 ShouldNotReachHere();
115 result = 0; // unreachable
116 }
117 return result;
118 }
119
120 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
121 if (addr->base()->is_illegal()) {
122 assert(addr->index()->is_illegal(), "must be illegal too");
123 __ movptr(tmp, (address)addr->disp());
124 return Address(tmp, 0);
125 }
126
127 Register base = addr->base()->as_pointer_register();
128 LIR_Opr index_opr = addr->index();
129
130 if (index_opr->is_illegal()) {
131 return Address(base, addr->disp());
132 }
133
134 int scale = addr->scale();
135 if (index_opr->is_cpu_register()) {
136 Register index;
137 if (index_opr->is_single_cpu()) {
138 index = index_opr->as_register();
139 } else {
140 index = index_opr->as_register_lo();
141 }
142 if (scale != 0) {
143 __ shadd(tmp, index, base, tmp, scale);
144 } else {
145 __ add(tmp, base, index);
146 }
147 return Address(tmp, addr->disp());
148 } else if (index_opr->is_constant()) {
149 intptr_t addr_offset = (((intptr_t)index_opr->as_constant_ptr()->as_jint()) << scale) + addr->disp();
150 return Address(base, addr_offset);
151 }
152
153 Unimplemented();
154 return Address();
155 }
156
157 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
158 ShouldNotReachHere();
159 return Address();
160 }
161
162 Address LIR_Assembler::as_Address(LIR_Address* addr) {
163 return as_Address(addr, t0);
164 }
165
166 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
167 return as_Address(addr);
168 }
169
170 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
171 // not encodable as a base + (immediate) offset, generate an explicit address
172 // calculation to hold the address in t0.
173 Address LIR_Assembler::stack_slot_address(int index, uint size, int adjust) {
174 precond(size == 4 || size == 8);
175 Address addr = frame_map()->address_for_slot(index, adjust);
176 precond(addr.getMode() == Address::base_plus_offset);
177 precond(addr.base() == sp);
178 precond(addr.offset() > 0);
179 uint mask = size - 1;
180 assert((addr.offset() & mask) == 0, "scaled offsets only");
181
182 return addr;
183 }
184
185 void LIR_Assembler::osr_entry() {
186 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
187 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
188 guarantee(osr_entry != nullptr, "null osr_entry!");
189 ValueStack* entry_state = osr_entry->state();
190 int number_of_locks = entry_state->locks_size();
191
192 // we jump here if osr happens with the interpreter
193 // state set up to continue at the beginning of the
194 // loop that triggered osr - in particular, we have
195 // the following registers setup:
196 //
197 // x12: osr buffer
198 //
199
200 //build frame
201 ciMethod* m = compilation()->method();
202 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
203
204 // OSR buffer is
205 //
206 // locals[nlocals-1..0]
207 // monitors[0..number_of_locks]
208 //
209 // locals is a direct copy of the interpreter frame so in the osr buffer
210 // so first slot in the local array is the last local from the interpreter
211 // and last slot is local[0] (receiver) from the interpreter
212 //
213 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
214 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
215 // in the interpreter frame (the method lock if a sync method)
216
217 // Initialize monitors in the compiled activation.
218 // x12: pointer to osr buffer
219 // All other registers are dead at this point and the locals will be
220 // copied into place by code emitted in the IR.
221
222 Register OSR_buf = osrBufferPointer()->as_pointer_register();
223 {
224 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
225 int monitor_offset = BytesPerWord * method()->max_locals() +
226 (2 * BytesPerWord) * (number_of_locks - 1);
227 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
228 // the OSR buffer using 2 word entries: first the lock and then
229 // the oop.
230 for (int i = 0; i < number_of_locks; i++) {
231 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
232 #ifdef ASSERT
233 // verify the interpreter's monitor has a non-null object
234 {
235 Label L;
236 __ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
237 __ bnez(t0, L);
238 __ stop("locked object is null");
239 __ bind(L);
240 }
241 #endif // ASSERT
242 __ ld(x9, Address(OSR_buf, slot_offset + 0));
243 __ sd(x9, frame_map()->address_for_monitor_lock(i));
244 __ ld(x9, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
245 __ sd(x9, frame_map()->address_for_monitor_object(i));
246 }
247 }
248 }
249
250 // inline cache check; done before the frame is built.
251 int LIR_Assembler::check_icache() {
252 return __ ic_check(CodeEntryAlignment);
253 }
254
255 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
256 if (o == nullptr) {
257 __ mv(reg, zr);
258 } else {
259 __ movoop(reg, o);
260 }
261 }
262
263 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
264 deoptimize_trap(info);
265 }
266
267 // This specifies the rsp decrement needed to build the frame
268 int LIR_Assembler::initial_frame_size_in_bytes() const {
269 // if rounding, must let FrameMap know!
270
271 return in_bytes(frame_map()->framesize_in_bytes());
272 }
273
274 int LIR_Assembler::emit_exception_handler() {
275 // generate code for exception handler
276 address handler_base = __ start_a_stub(exception_handler_size());
277 if (handler_base == nullptr) {
278 // not enough space left for the handler
279 bailout("exception handler overflow");
280 return -1;
281 }
282
283 int offset = code_offset();
284
285 // the exception oop and pc are in x10, and x13
286 // no other registers need to be preserved, so invalidate them
287 __ invalidate_registers(false, true, true, false, true, true);
288
289 // check that there is really an exception
290 __ verify_not_null_oop(x10);
291
292 // search an exception handler (x10: exception oop, x13: throwing pc)
293 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
294 __ should_not_reach_here();
295 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
296 __ end_a_stub();
297
298 return offset;
299 }
300
301 // Emit the code to remove the frame from the stack in the exception
302 // unwind path.
303 int LIR_Assembler::emit_unwind_handler() {
304 #ifndef PRODUCT
305 if (CommentedAssembly) {
306 _masm->block_comment("Unwind handler");
307 }
308 #endif // PRODUCT
309
310 int offset = code_offset();
311
312 // Fetch the exception from TLS and clear out exception related thread state
313 __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
314 __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
315 __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
316
317 __ bind(_unwind_handler_entry);
318 __ verify_not_null_oop(x10);
319 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
320 __ mv(x9, x10); // Preserve the exception
321 }
322
323 // Perform needed unlocking
324 MonitorExitStub* stub = nullptr;
325 if (method()->is_synchronized()) {
326 monitor_address(0, FrameMap::r10_opr);
327 stub = new MonitorExitStub(FrameMap::r10_opr, 0);
328 __ unlock_object(x15, x14, x10, x16, *stub->entry());
329 __ bind(*stub->continuation());
330 }
331
332 if (compilation()->env()->dtrace_method_probes()) {
333 __ mv(c_rarg0, xthread);
334 __ mov_metadata(c_rarg1, method()->constant_encoding());
335 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
336 }
337
338 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
339 __ mv(x10, x9); // Restore the exception
340 }
341
342 // remove the activation and dispatch to the unwind handler
343 __ block_comment("remove_frame and dispatch to the unwind handler");
344 __ remove_frame(initial_frame_size_in_bytes());
345 __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
346
347 // Emit the slow path assembly
348 if (stub != nullptr) {
349 stub->emit_code(this);
350 }
351
352 return offset;
353 }
354
355 int LIR_Assembler::emit_deopt_handler() {
356 // generate code for exception handler
357 address handler_base = __ start_a_stub(deopt_handler_size());
358 if (handler_base == nullptr) {
359 // not enough space left for the handler
360 bailout("deopt handler overflow");
361 return -1;
362 }
363
364 int offset = code_offset();
365
366 Label start;
367 __ bind(start);
368
369 __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
370
371 int entry_offset = __ offset();
372 __ j(start);
373
374 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
375 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
376 "out of bounds read in post-call NOP check");
377 __ end_a_stub();
378
379 return entry_offset;
380 }
381
382 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
383 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == x10, "word returns are in x10");
384
385 // Pop the stack before the safepoint code
386 __ remove_frame(initial_frame_size_in_bytes());
387
388 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
389 __ reserved_stack_check();
390 }
391
392 code_stub->set_safepoint_offset(__ offset());
393 __ relocate(relocInfo::poll_return_type);
394 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
395 __ ret();
396 }
397
398 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
399 guarantee(info != nullptr, "Shouldn't be null");
400 __ get_polling_page(t0, relocInfo::poll_type);
401 add_debug_info_for_branch(info); // This isn't just debug info:
402 // it's the oop map
403 __ read_polling_page(t0, 0, relocInfo::poll_type);
404 return __ offset();
405 }
406
407 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
408 __ mv(to_reg, from_reg);
409 }
410
411 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
412
413 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
414 assert(src->is_constant(), "should not call otherwise");
415 assert(dest->is_register(), "should not call otherwise");
416 LIR_Const* c = src->as_constant_ptr();
417 address const_addr = nullptr;
418 jfloat fconst;
419 jdouble dconst;
420
421 switch (c->type()) {
422 case T_INT:
423 assert(patch_code == lir_patch_none, "no patching handled here");
424 __ mv(dest->as_register(), c->as_jint());
425 break;
426
427 case T_ADDRESS:
428 assert(patch_code == lir_patch_none, "no patching handled here");
429 __ mv(dest->as_register(), c->as_jint());
430 break;
431
432 case T_LONG:
433 assert(patch_code == lir_patch_none, "no patching handled here");
434 __ mv(dest->as_register_lo(), (intptr_t)c->as_jlong());
435 break;
436
437 case T_OBJECT:
438 case T_ARRAY:
439 if (patch_code == lir_patch_none) {
440 jobject2reg(c->as_jobject(), dest->as_register());
441 } else {
442 jobject2reg_with_patching(dest->as_register(), info);
443 }
444 break;
445
446 case T_METADATA:
447 if (patch_code != lir_patch_none) {
448 klass2reg_with_patching(dest->as_register(), info);
449 } else {
450 __ mov_metadata(dest->as_register(), c->as_metadata());
451 }
452 break;
453
454 case T_FLOAT:
455 fconst = c->as_jfloat();
456 if (MacroAssembler::can_fp_imm_load(fconst)) {
457 __ fli_s(dest->as_float_reg(), fconst);
458 } else {
459 const_addr = float_constant(fconst);
460 assert(const_addr != nullptr, "must create float constant in the constant table");
461 __ flw(dest->as_float_reg(), InternalAddress(const_addr));
462 }
463 break;
464
465 case T_DOUBLE:
466 dconst = c->as_jdouble();
467 if (MacroAssembler::can_dp_imm_load(dconst)) {
468 __ fli_d(dest->as_double_reg(), dconst);
469 } else {
470 const_addr = double_constant(c->as_jdouble());
471 assert(const_addr != nullptr, "must create double constant in the constant table");
472 __ fld(dest->as_double_reg(), InternalAddress(const_addr));
473 }
474 break;
475
476 default:
477 ShouldNotReachHere();
478 }
479 }
480
481 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
482 assert(src->is_constant(), "should not call otherwise");
483 assert(dest->is_stack(), "should not call otherwise");
484 LIR_Const* c = src->as_constant_ptr();
485 switch (c->type()) {
486 case T_OBJECT:
487 if (c->as_jobject() == nullptr) {
488 __ sd(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
489 } else {
490 const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
491 reg2stack(FrameMap::t1_opr, dest, c->type());
492 }
493 break;
494 case T_ADDRESS: // fall through
495 const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
496 reg2stack(FrameMap::t1_opr, dest, c->type());
497 case T_INT: // fall through
498 case T_FLOAT:
499 if (c->as_jint_bits() == 0) {
500 __ sw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
501 } else {
502 __ mv(t1, c->as_jint_bits());
503 __ sw(t1, frame_map()->address_for_slot(dest->single_stack_ix()));
504 }
505 break;
506 case T_LONG: // fall through
507 case T_DOUBLE:
508 if (c->as_jlong_bits() == 0) {
509 __ sd(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
510 lo_word_offset_in_bytes));
511 } else {
512 __ mv(t1, (intptr_t)c->as_jlong_bits());
513 __ sd(t1, frame_map()->address_for_slot(dest->double_stack_ix(),
514 lo_word_offset_in_bytes));
515 }
516 break;
517 default:
518 ShouldNotReachHere();
519 }
520 }
521
522 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
523 assert(src->is_constant(), "should not call otherwise");
524 assert(dest->is_address(), "should not call otherwise");
525 LIR_Const* c = src->as_constant_ptr();
526 LIR_Address* to_addr = dest->as_address_ptr();
527 void (MacroAssembler::* insn)(Register Rt, const Address &adr, Register temp);
528 switch (type) {
529 case T_ADDRESS:
530 assert(c->as_jint() == 0, "should be");
531 insn = &MacroAssembler::sd; break;
532 case T_LONG:
533 assert(c->as_jlong() == 0, "should be");
534 insn = &MacroAssembler::sd; break;
535 case T_DOUBLE:
536 assert(c->as_jdouble() == 0.0, "should be");
537 insn = &MacroAssembler::sd; break;
538 case T_INT:
539 assert(c->as_jint() == 0, "should be");
540 insn = &MacroAssembler::sw; break;
541 case T_FLOAT:
542 assert(c->as_jfloat() == 0.0f, "should be");
543 insn = &MacroAssembler::sw; break;
544 case T_OBJECT: // fall through
545 case T_ARRAY:
546 assert(c->as_jobject() == nullptr, "should be");
547 if (UseCompressedOops && !wide) {
548 insn = &MacroAssembler::sw;
549 } else {
550 insn = &MacroAssembler::sd;
551 }
552 break;
553 case T_CHAR: // fall through
554 case T_SHORT:
555 assert(c->as_jint() == 0, "should be");
556 insn = &MacroAssembler::sh;
557 break;
558 case T_BOOLEAN: // fall through
559 case T_BYTE:
560 assert(c->as_jint() == 0, "should be");
561 insn = &MacroAssembler::sb; break;
562 default:
563 ShouldNotReachHere();
564 insn = &MacroAssembler::sd; // unreachable
565 }
566 if (info != nullptr) {
567 add_debug_info_for_null_check_here(info);
568 }
569 (_masm->*insn)(zr, as_Address(to_addr), t0);
570 }
571
572 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
573 assert(src->is_register(), "should not call otherwise");
574 assert(dest->is_register(), "should not call otherwise");
575
576 // move between cpu-registers
577 if (dest->is_single_cpu()) {
578 if (src->type() == T_LONG) {
579 // Can do LONG -> OBJECT
580 move_regs(src->as_register_lo(), dest->as_register());
581 return;
582 }
583 assert(src->is_single_cpu(), "must match");
584 if (src->type() == T_OBJECT) {
585 __ verify_oop(src->as_register());
586 }
587 move_regs(src->as_register(), dest->as_register());
588 } else if (dest->is_double_cpu()) {
589 if (is_reference_type(src->type())) {
590 __ verify_oop(src->as_register());
591 move_regs(src->as_register(), dest->as_register_lo());
592 return;
593 }
594 assert(src->is_double_cpu(), "must match");
595 Register f_lo = src->as_register_lo();
596 Register f_hi = src->as_register_hi();
597 Register t_lo = dest->as_register_lo();
598 Register t_hi = dest->as_register_hi();
599 assert(f_hi == f_lo, "must be same");
600 assert(t_hi == t_lo, "must be same");
601 move_regs(f_lo, t_lo);
602 } else if (dest->is_single_fpu()) {
603 assert(src->is_single_fpu(), "expect single fpu");
604 __ fmv_s(dest->as_float_reg(), src->as_float_reg());
605 } else if (dest->is_double_fpu()) {
606 assert(src->is_double_fpu(), "expect double fpu");
607 __ fmv_d(dest->as_double_reg(), src->as_double_reg());
608 } else {
609 ShouldNotReachHere();
610 }
611 }
612
613 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
614 precond(src->is_register() && dest->is_stack());
615
616 uint const c_sz32 = sizeof(uint32_t);
617 uint const c_sz64 = sizeof(uint64_t);
618
619 assert(src->is_register(), "should not call otherwise");
620 assert(dest->is_stack(), "should not call otherwise");
621 if (src->is_single_cpu()) {
622 int index = dest->single_stack_ix();
623 if (is_reference_type(type)) {
624 __ sd(src->as_register(), stack_slot_address(index, c_sz64));
625 __ verify_oop(src->as_register());
626 } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
627 __ sd(src->as_register(), stack_slot_address(index, c_sz64));
628 } else {
629 __ sw(src->as_register(), stack_slot_address(index, c_sz32));
630 }
631 } else if (src->is_double_cpu()) {
632 int index = dest->double_stack_ix();
633 Address dest_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes);
634 __ sd(src->as_register_lo(), dest_addr_LO);
635 } else if (src->is_single_fpu()) {
636 int index = dest->single_stack_ix();
637 __ fsw(src->as_float_reg(), stack_slot_address(index, c_sz32));
638 } else if (src->is_double_fpu()) {
639 int index = dest->double_stack_ix();
640 __ fsd(src->as_double_reg(), stack_slot_address(index, c_sz64));
641 } else {
642 ShouldNotReachHere();
643 }
644 }
645
646 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
647 LIR_Address* to_addr = dest->as_address_ptr();
648 // t0 was used as tmp reg in as_Address, so we use t1 as compressed_src
649 Register compressed_src = t1;
650
651 if (patch_code != lir_patch_none) {
652 deoptimize_trap(info);
653 return;
654 }
655
656 if (is_reference_type(type)) {
657 __ verify_oop(src->as_register());
658
659 if (UseCompressedOops && !wide) {
660 __ encode_heap_oop(compressed_src, src->as_register());
661 } else {
662 compressed_src = src->as_register();
663 }
664 }
665
666 int null_check_here = code_offset();
667
668 switch (type) {
669 case T_FLOAT:
670 __ fsw(src->as_float_reg(), as_Address(to_addr));
671 break;
672
673 case T_DOUBLE:
674 __ fsd(src->as_double_reg(), as_Address(to_addr));
675 break;
676
677 case T_ARRAY: // fall through
678 case T_OBJECT:
679 if (UseCompressedOops && !wide) {
680 __ sw(compressed_src, as_Address(to_addr));
681 } else {
682 __ sd(compressed_src, as_Address(to_addr));
683 }
684 break;
685 case T_METADATA:
686 // We get here to store a method pointer to the stack to pass to
687 // a dtrace runtime call. This can't work on 64 bit with
688 // compressed klass ptrs: T_METADATA can be compressed klass
689 // ptr or a 64 bit method pointer.
690 ShouldNotReachHere();
691 __ sd(src->as_register(), as_Address(to_addr));
692 break;
693 case T_ADDRESS:
694 __ sd(src->as_register(), as_Address(to_addr));
695 break;
696 case T_INT:
697 __ sw(src->as_register(), as_Address(to_addr));
698 break;
699 case T_LONG:
700 __ sd(src->as_register_lo(), as_Address(to_addr));
701 break;
702 case T_BYTE: // fall through
703 case T_BOOLEAN:
704 __ sb(src->as_register(), as_Address(to_addr));
705 break;
706 case T_CHAR: // fall through
707 case T_SHORT:
708 __ sh(src->as_register(), as_Address(to_addr));
709 break;
710 default:
711 ShouldNotReachHere();
712 }
713
714 if (info != nullptr) {
715 add_debug_info_for_null_check(null_check_here, info);
716 }
717 }
718
719 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
720 precond(src->is_stack() && dest->is_register());
721
722 uint const c_sz32 = sizeof(uint32_t);
723 uint const c_sz64 = sizeof(uint64_t);
724
725 if (dest->is_single_cpu()) {
726 int index = src->single_stack_ix();
727 if (type == T_INT) {
728 __ lw(dest->as_register(), stack_slot_address(index, c_sz32));
729 } else if (is_reference_type(type)) {
730 __ ld(dest->as_register(), stack_slot_address(index, c_sz64));
731 __ verify_oop(dest->as_register());
732 } else if (type == T_METADATA || type == T_ADDRESS) {
733 __ ld(dest->as_register(), stack_slot_address(index, c_sz64));
734 } else {
735 __ lwu(dest->as_register(), stack_slot_address(index, c_sz32));
736 }
737 } else if (dest->is_double_cpu()) {
738 int index = src->double_stack_ix();
739 Address src_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes);
740 __ ld(dest->as_register_lo(), src_addr_LO);
741 } else if (dest->is_single_fpu()) {
742 int index = src->single_stack_ix();
743 __ flw(dest->as_float_reg(), stack_slot_address(index, c_sz32));
744 } else if (dest->is_double_fpu()) {
745 int index = src->double_stack_ix();
746 __ fld(dest->as_double_reg(), stack_slot_address(index, c_sz64));
747 } else {
748 ShouldNotReachHere();
749 }
750 }
751
752 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
753 deoptimize_trap(info);
754 }
755
756 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
757 LIR_Opr temp;
758 if (type == T_LONG || type == T_DOUBLE) {
759 temp = FrameMap::t1_long_opr;
760 } else {
761 temp = FrameMap::t1_opr;
762 }
763
764 stack2reg(src, temp, src->type());
765 reg2stack(temp, dest, dest->type());
766 }
767
768 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
769 assert(src->is_address(), "should not call otherwise");
770 assert(dest->is_register(), "should not call otherwise");
771
772 LIR_Address* addr = src->as_address_ptr();
773 LIR_Address* from_addr = src->as_address_ptr();
774
775 if (addr->base()->type() == T_OBJECT) {
776 __ verify_oop(addr->base()->as_pointer_register());
777 }
778
779 if (patch_code != lir_patch_none) {
780 deoptimize_trap(info);
781 return;
782 }
783
784 if (info != nullptr) {
785 add_debug_info_for_null_check_here(info);
786 }
787
788 int null_check_here = code_offset();
789 switch (type) {
790 case T_FLOAT:
791 __ flw(dest->as_float_reg(), as_Address(from_addr));
792 break;
793 case T_DOUBLE:
794 __ fld(dest->as_double_reg(), as_Address(from_addr));
795 break;
796 case T_ARRAY: // fall through
797 case T_OBJECT:
798 if (UseCompressedOops && !wide) {
799 __ lwu(dest->as_register(), as_Address(from_addr));
800 } else {
801 __ ld(dest->as_register(), as_Address(from_addr));
802 }
803 break;
804 case T_METADATA:
805 // We get here to store a method pointer to the stack to pass to
806 // a dtrace runtime call. This can't work on 64 bit with
807 // compressed klass ptrs: T_METADATA can be a compressed klass
808 // ptr or a 64 bit method pointer.
809 ShouldNotReachHere();
810 __ ld(dest->as_register(), as_Address(from_addr));
811 break;
812 case T_ADDRESS:
813 __ ld(dest->as_register(), as_Address(from_addr));
814 break;
815 case T_INT:
816 __ lw(dest->as_register(), as_Address(from_addr));
817 break;
818 case T_LONG:
819 __ ld(dest->as_register_lo(), as_Address_lo(from_addr));
820 break;
821 case T_BYTE:
822 __ lb(dest->as_register(), as_Address(from_addr));
823 break;
824 case T_BOOLEAN:
825 __ lbu(dest->as_register(), as_Address(from_addr));
826 break;
827 case T_CHAR:
828 __ lhu(dest->as_register(), as_Address(from_addr));
829 break;
830 case T_SHORT:
831 __ lh(dest->as_register(), as_Address(from_addr));
832 break;
833 default:
834 ShouldNotReachHere();
835 }
836
837 if (is_reference_type(type)) {
838 if (UseCompressedOops && !wide) {
839 __ decode_heap_oop(dest->as_register());
840 }
841
842 __ verify_oop(dest->as_register());
843 }
844 }
845
846 void LIR_Assembler::emit_op3(LIR_Op3* op) {
847 switch (op->code()) {
848 case lir_idiv: // fall through
849 case lir_irem:
850 arithmetic_idiv(op->code(),
851 op->in_opr1(),
852 op->in_opr2(),
853 op->in_opr3(),
854 op->result_opr(),
855 op->info());
856 break;
857 case lir_fmad:
858 __ fmadd_d(op->result_opr()->as_double_reg(),
859 op->in_opr1()->as_double_reg(),
860 op->in_opr2()->as_double_reg(),
861 op->in_opr3()->as_double_reg());
862 break;
863 case lir_fmaf:
864 __ fmadd_s(op->result_opr()->as_float_reg(),
865 op->in_opr1()->as_float_reg(),
866 op->in_opr2()->as_float_reg(),
867 op->in_opr3()->as_float_reg());
868 break;
869 default:
870 ShouldNotReachHere();
871 }
872 }
873
874 // Consider using cmov (Zicond)
875 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
876 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
877 Label label;
878
879 emit_branch(condition, cmp_opr1, cmp_opr2, label, /* is_far */ false,
880 /* is_unordered */ (condition == lir_cond_greaterEqual || condition == lir_cond_greater) ? false : true);
881
882 Label done;
883 move_op(opr2, result, type, lir_patch_none, nullptr,
884 false); // wide
885 __ j(done);
886 __ bind(label);
887 move_op(opr1, result, type, lir_patch_none, nullptr,
888 false); // wide
889 __ bind(done);
890 }
891
892 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
893 LIR_Condition condition = op->cond();
894 if (condition == lir_cond_always) {
895 if (op->info() != nullptr) {
896 add_debug_info_for_branch(op->info());
897 }
898 } else {
899 assert(op->in_opr1() != LIR_OprFact::illegalOpr && op->in_opr2() != LIR_OprFact::illegalOpr, "conditional branches must have legal operands");
900 }
901 bool is_unordered = (op->ublock() == op->block());
902 emit_branch(condition, op->in_opr1(), op->in_opr2(), *op->label(), /* is_far */ true, is_unordered);
903 }
904
905 void LIR_Assembler::emit_branch(LIR_Condition cmp_flag, LIR_Opr cmp1, LIR_Opr cmp2, Label& label,
906 bool is_far, bool is_unordered) {
907
908 if (cmp_flag == lir_cond_always) {
909 __ j(label);
910 return;
911 }
912
913 if (cmp1->is_cpu_register()) {
914 Register reg1 = as_reg(cmp1);
915 if (cmp2->is_cpu_register()) {
916 Register reg2 = as_reg(cmp2);
917 __ c1_cmp_branch(cmp_flag, reg1, reg2, label, cmp1->type(), is_far);
918 } else if (cmp2->is_constant()) {
919 const2reg_helper(cmp2);
920 __ c1_cmp_branch(cmp_flag, reg1, t0, label, cmp2->type(), is_far);
921 } else {
922 ShouldNotReachHere();
923 }
924 } else if (cmp1->is_single_fpu()) {
925 assert(cmp2->is_single_fpu(), "expect single float register");
926 __ c1_float_cmp_branch(cmp_flag, cmp1->as_float_reg(), cmp2->as_float_reg(), label, is_far, is_unordered);
927 } else if (cmp1->is_double_fpu()) {
928 assert(cmp2->is_double_fpu(), "expect double float register");
929 __ c1_float_cmp_branch(cmp_flag | C1_MacroAssembler::c1_double_branch_mask,
930 cmp1->as_double_reg(), cmp2->as_double_reg(), label, is_far, is_unordered);
931 } else {
932 ShouldNotReachHere();
933 }
934 }
935
936 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
937 LIR_Opr src = op->in_opr();
938 LIR_Opr dest = op->result_opr();
939
940 switch (op->bytecode()) {
941 case Bytecodes::_i2f:
942 __ fcvt_s_w(dest->as_float_reg(), src->as_register()); break;
943 case Bytecodes::_i2d:
944 __ fcvt_d_w(dest->as_double_reg(), src->as_register()); break;
945 case Bytecodes::_l2d:
946 __ fcvt_d_l(dest->as_double_reg(), src->as_register_lo()); break;
947 case Bytecodes::_l2f:
948 __ fcvt_s_l(dest->as_float_reg(), src->as_register_lo()); break;
949 case Bytecodes::_f2d:
950 __ fcvt_d_s(dest->as_double_reg(), src->as_float_reg()); break;
951 case Bytecodes::_d2f:
952 __ fcvt_s_d(dest->as_float_reg(), src->as_double_reg()); break;
953 case Bytecodes::_i2c:
954 __ zext(dest->as_register(), src->as_register(), 16); break;
955 case Bytecodes::_i2l:
956 __ sext(dest->as_register_lo(), src->as_register(), 32); break;
957 case Bytecodes::_i2s:
958 __ sext(dest->as_register(), src->as_register(), 16); break;
959 case Bytecodes::_i2b:
960 __ sext(dest->as_register(), src->as_register(), 8); break;
961 case Bytecodes::_l2i:
962 __ sext(dest->as_register(), src->as_register_lo(), 32); break;
963 case Bytecodes::_d2l:
964 __ fcvt_l_d_safe(dest->as_register_lo(), src->as_double_reg()); break;
965 case Bytecodes::_f2i:
966 __ fcvt_w_s_safe(dest->as_register(), src->as_float_reg()); break;
967 case Bytecodes::_f2l:
968 __ fcvt_l_s_safe(dest->as_register_lo(), src->as_float_reg()); break;
969 case Bytecodes::_d2i:
970 __ fcvt_w_d_safe(dest->as_register(), src->as_double_reg()); break;
971 default:
972 ShouldNotReachHere();
973 }
974 }
975
976 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
977 if (op->init_check()) {
978 __ lbu(t0, Address(op->klass()->as_register(),
979 InstanceKlass::init_state_offset()));
980 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
981 __ mv(t1, (u1)InstanceKlass::fully_initialized);
982 add_debug_info_for_null_check_here(op->stub()->info());
983 __ bne(t0, t1, *op->stub()->entry(), /* is_far */ true);
984 }
985
986 __ allocate_object(op->obj()->as_register(),
987 op->tmp1()->as_register(),
988 op->tmp2()->as_register(),
989 op->header_size(),
990 op->object_size(),
991 op->klass()->as_register(),
992 *op->stub()->entry());
993
994 __ bind(*op->stub()->continuation());
995 }
996
997 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
998 Register len = op->len()->as_register();
999
1000 if (UseSlowPath ||
1001 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1002 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1003 __ j(*op->stub()->entry());
1004 } else {
1005 Register tmp1 = op->tmp1()->as_register();
1006 Register tmp2 = op->tmp2()->as_register();
1007 Register tmp3 = op->tmp3()->as_register();
1008 if (len == tmp1) {
1009 tmp1 = tmp3;
1010 } else if (len == tmp2) {
1011 tmp2 = tmp3;
1012 } else if (len == tmp3) {
1013 // everything is ok
1014 } else {
1015 __ mv(tmp3, len);
1016 }
1017 __ allocate_array(op->obj()->as_register(),
1018 len,
1019 tmp1,
1020 tmp2,
1021 arrayOopDesc::base_offset_in_bytes(op->type()),
1022 array_element_size(op->type()),
1023 op->klass()->as_register(),
1024 *op->stub()->entry(),
1025 op->zero_array());
1026 }
1027 __ bind(*op->stub()->continuation());
1028 }
1029
1030 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
1031 ciProfileData *data, Register recv) {
1032 int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
1033 __ profile_receiver_type(recv, mdo, mdp_offset);
1034 }
1035
1036 void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) {
1037 ciMethod* method = op->profiled_method();
1038 assert(method != nullptr, "Should have method");
1039 int bci = op->profiled_bci();
1040 *md = method->method_data_or_null();
1041 guarantee(*md != nullptr, "Sanity");
1042 *data = ((*md)->bci_to_data(bci));
1043 assert(*data != nullptr, "need data for type check");
1044 assert((*data)->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1045 }
1046
1047 void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Register Rtmp1,
1048 Register k_RInfo, Register klass_RInfo,
1049 Label *failure_target, Label *success_target) {
1050 // get object class
1051 // not a safepoint as obj null check happens earlier
1052 __ load_klass(klass_RInfo, obj);
1053 if (k->is_loaded()) {
1054 // See if we get an immediate positive hit
1055 __ ld(t0, Address(klass_RInfo, int64_t(k->super_check_offset())));
1056 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1057 __ bne(k_RInfo, t0, *failure_target, /* is_far */ true);
1058 // successful cast, fall through to profile or jump
1059 } else {
1060 // See if we get an immediate positive hit
1061 __ beq(k_RInfo, t0, *success_target);
1062 // check for self
1063 __ beq(klass_RInfo, k_RInfo, *success_target);
1064
1065 __ subi(sp, sp, 2 * wordSize); // 2: store k_RInfo and klass_RInfo
1066 __ sd(k_RInfo, Address(sp, 0)); // sub klass
1067 __ sd(klass_RInfo, Address(sp, wordSize)); // super klass
1068 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1069 // load result to k_RInfo
1070 __ ld(k_RInfo, Address(sp, 0));
1071 __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
1072 // result is a boolean
1073 __ beqz(k_RInfo, *failure_target, /* is_far */ true);
1074 // successful cast, fall through to profile or jump
1075 }
1076 } else {
1077 // perform the fast part of the checking logic
1078 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1079 // call out-of-line instance of __ check_klass_subtytpe_slow_path(...)
1080 __ subi(sp, sp, 2 * wordSize); // 2: store k_RInfo and klass_RInfo
1081 __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass
1082 __ sd(k_RInfo, Address(sp, 0)); // super klass
1083 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1084 // load result to k_RInfo
1085 __ ld(k_RInfo, Address(sp, 0));
1086 __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
1087 // result is a boolean
1088 __ beqz(k_RInfo, *failure_target, /* is_far */ true);
1089 // successful cast, fall thriugh to profile or jump
1090 }
1091 }
1092
1093 void LIR_Assembler::profile_object(ciMethodData* md, ciProfileData* data, Register obj,
1094 Register k_RInfo, Register klass_RInfo, Label* obj_is_null) {
1095 Register mdo = klass_RInfo;
1096 __ mov_metadata(mdo, md->constant_encoding());
1097 Label not_null;
1098 __ bnez(obj, not_null);
1099 // Object is null, update MDO and exit
1100 Address data_addr = __ form_address(t1, mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1101 __ lbu(t0, data_addr);
1102 __ ori(t0, t0, BitData::null_seen_byte_constant());
1103 __ sb(t0, data_addr);
1104 __ j(*obj_is_null);
1105 __ bind(not_null);
1106
1107 Register recv = k_RInfo;
1108 __ load_klass(recv, obj);
1109 type_profile_helper(mdo, md, data, recv);
1110 }
1111
1112 void LIR_Assembler::typecheck_loaded(LIR_OpTypeCheck *op, ciKlass* k, Register k_RInfo) {
1113 if (!k->is_loaded()) {
1114 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1115 } else {
1116 __ mov_metadata(k_RInfo, k->constant_encoding());
1117 }
1118 }
1119
1120 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1121 Register obj = op->object()->as_register();
1122 Register k_RInfo = op->tmp1()->as_register();
1123 Register klass_RInfo = op->tmp2()->as_register();
1124 Register dst = op->result_opr()->as_register();
1125 ciKlass* k = op->klass();
1126 Register Rtmp1 = noreg;
1127
1128 // check if it needs to be profiled
1129 ciMethodData* md = nullptr;
1130 ciProfileData* data = nullptr;
1131
1132 const bool should_profile = op->should_profile();
1133 if (should_profile) {
1134 data_check(op, &md, &data);
1135 }
1136 Label* success_target = success;
1137 Label* failure_target = failure;
1138
1139 if (obj == k_RInfo) {
1140 k_RInfo = dst;
1141 } else if (obj == klass_RInfo) {
1142 klass_RInfo = dst;
1143 }
1144 Rtmp1 = op->tmp3()->as_register();
1145 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1146
1147 assert_different_registers(obj, k_RInfo, klass_RInfo);
1148
1149 if (should_profile) {
1150 profile_object(md, data, obj, k_RInfo, klass_RInfo, obj_is_null);
1151 } else {
1152 __ beqz(obj, *obj_is_null);
1153 }
1154
1155 typecheck_loaded(op, k, k_RInfo);
1156 __ verify_oop(obj);
1157
1158 if (op->fast_check()) {
1159 // get object class
1160 // not a safepoint as obj null check happens earlier
1161 __ load_klass(t0, obj, t1);
1162 __ bne(t0, k_RInfo, *failure_target, /* is_far */ true);
1163 // successful cast, fall through to profile or jump
1164 } else {
1165 typecheck_helper_slowcheck(k, obj, Rtmp1, k_RInfo, klass_RInfo, failure_target, success_target);
1166 }
1167
1168 __ j(*success);
1169 }
1170
1171 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1172 const bool should_profile = op->should_profile();
1173
1174 LIR_Code code = op->code();
1175 if (code == lir_store_check) {
1176 typecheck_lir_store(op, should_profile);
1177 } else if (code == lir_checkcast) {
1178 Register obj = op->object()->as_register();
1179 Register dst = op->result_opr()->as_register();
1180 Label success;
1181 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1182 __ bind(success);
1183 if (dst != obj) {
1184 __ mv(dst, obj);
1185 }
1186 } else if (code == lir_instanceof) {
1187 Register obj = op->object()->as_register();
1188 Register dst = op->result_opr()->as_register();
1189 Label success, failure, done;
1190 emit_typecheck_helper(op, &success, &failure, &failure);
1191 __ bind(failure);
1192 __ mv(dst, zr);
1193 __ j(done);
1194 __ bind(success);
1195 __ mv(dst, 1);
1196 __ bind(done);
1197 } else {
1198 ShouldNotReachHere();
1199 }
1200 }
1201
1202 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1203 Register addr;
1204 if (op->addr()->is_register()) {
1205 addr = as_reg(op->addr());
1206 } else {
1207 assert(op->addr()->is_address(), "what else?");
1208 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1209 assert(addr_ptr->disp() == 0, "need 0 disp");
1210 assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index");
1211 addr = as_reg(addr_ptr->base());
1212 }
1213 Register newval = as_reg(op->new_value());
1214 Register cmpval = as_reg(op->cmp_value());
1215
1216 if (op->code() == lir_cas_obj) {
1217 if (UseCompressedOops) {
1218 Register tmp1 = op->tmp1()->as_register();
1219 assert(op->tmp1()->is_valid(), "must be");
1220 Register tmp2 = op->tmp2()->as_register();
1221 assert(op->tmp2()->is_valid(), "must be");
1222
1223 __ encode_heap_oop(tmp1, cmpval);
1224 cmpval = tmp1;
1225 __ encode_heap_oop(tmp2, newval);
1226 newval = tmp2;
1227 caswu(addr, newval, cmpval);
1228 } else {
1229 casl(addr, newval, cmpval);
1230 }
1231 } else if (op->code() == lir_cas_int) {
1232 casw(addr, newval, cmpval);
1233 } else {
1234 casl(addr, newval, cmpval);
1235 }
1236
1237 if (op->result_opr()->is_valid()) {
1238 assert(op->result_opr()->is_register(), "need a register");
1239 __ mv(as_reg(op->result_opr()), t0); // cas result in t0, and 0 for success
1240 }
1241 }
1242
1243 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1244 switch (code) {
1245 case lir_abs: __ fabs_d(dest->as_double_reg(), value->as_double_reg()); break;
1246 case lir_sqrt: __ fsqrt_d(dest->as_double_reg(), value->as_double_reg()); break;
1247 default: ShouldNotReachHere();
1248 }
1249 }
1250
1251 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1252 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1253 Register Rleft = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
1254 if (dst->is_single_cpu()) {
1255 Register Rdst = dst->as_register();
1256 if (right->is_constant()) {
1257 int right_const = right->as_jint();
1258 if (Assembler::is_simm12(right_const)) {
1259 logic_op_imm(Rdst, Rleft, right_const, code);
1260 __ sext(Rdst, Rdst, 32);
1261 } else {
1262 __ mv(t0, right_const);
1263 logic_op_reg32(Rdst, Rleft, t0, code);
1264 }
1265 } else {
1266 Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo();
1267 logic_op_reg32(Rdst, Rleft, Rright, code);
1268 }
1269 } else {
1270 Register Rdst = dst->as_register_lo();
1271 if (right->is_constant()) {
1272 long right_const = right->as_jlong();
1273 if (Assembler::is_simm12(right_const)) {
1274 logic_op_imm(Rdst, Rleft, right_const, code);
1275 } else {
1276 __ mv(t0, right_const);
1277 logic_op_reg(Rdst, Rleft, t0, code);
1278 }
1279 } else {
1280 Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo();
1281 logic_op_reg(Rdst, Rleft, Rright, code);
1282 }
1283 }
1284 }
1285
1286 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op) {
1287 ShouldNotCallThis();
1288 }
1289
1290 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
1291 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1292 bool is_unordered_less = (code == lir_ucmp_fd2i);
1293 if (left->is_single_fpu()) {
1294 __ float_cmp(true, is_unordered_less ? -1 : 1,
1295 left->as_float_reg(), right->as_float_reg(), dst->as_register());
1296 } else if (left->is_double_fpu()) {
1297 __ float_cmp(false, is_unordered_less ? -1 : 1,
1298 left->as_double_reg(), right->as_double_reg(), dst->as_register());
1299 } else {
1300 ShouldNotReachHere();
1301 }
1302 } else if (code == lir_cmp_l2i) {
1303 __ cmp_l2i(dst->as_register(), left->as_register_lo(), right->as_register_lo());
1304 } else {
1305 ShouldNotReachHere();
1306 }
1307 }
1308
1309 void LIR_Assembler::align_call(LIR_Code code) {
1310 // With RVC a call instruction may get 2-byte aligned.
1311 // The address of the call instruction needs to be 4-byte aligned to
1312 // ensure that it does not span a cache line so that it can be patched.
1313 __ align(NativeInstruction::instruction_size);
1314 }
1315
1316 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1317 Assembler::IncompressibleScope scope(_masm);
1318 address call = __ reloc_call(Address(op->addr(), rtype));
1319 if (call == nullptr) {
1320 bailout("reloc call address stub overflow");
1321 return;
1322 }
1323 add_call_info(code_offset(), op->info());
1324 __ post_call_nop();
1325 }
1326
1327 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1328 Assembler::IncompressibleScope scope(_masm);
1329 address call = __ ic_call(op->addr());
1330 if (call == nullptr) {
1331 bailout("reloc call address stub overflow");
1332 return;
1333 }
1334 add_call_info(code_offset(), op->info());
1335 __ post_call_nop();
1336 }
1337
1338 void LIR_Assembler::emit_static_call_stub() {
1339 address call_pc = __ pc();
1340 MacroAssembler::assert_alignment(call_pc);
1341 address stub = __ start_a_stub(call_stub_size());
1342 if (stub == nullptr) {
1343 bailout("static call stub overflow");
1344 return;
1345 }
1346
1347 int start = __ offset();
1348
1349 __ relocate(static_stub_Relocation::spec(call_pc));
1350 __ emit_static_call_stub();
1351
1352 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
1353 <= call_stub_size(), "stub too big");
1354 __ end_a_stub();
1355 }
1356
1357 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1358 assert(exceptionOop->as_register() == x10, "must match");
1359 assert(exceptionPC->as_register() == x13, "must match");
1360
1361 // exception object is not added to oop map by LinearScan
1362 // (LinearScan assumes that no oops are in fixed registers)
1363 info->add_register_oop(exceptionOop);
1364 StubId unwind_id;
1365
1366 // get current pc information
1367 // pc is only needed if the method has an exception handler, the unwind code does not need it.
1368 if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
1369 // As no instructions have been generated yet for this LIR node it's
1370 // possible that an oop map already exists for the current offset.
1371 // In that case insert an dummy NOP here to ensure all oop map PCs
1372 // are unique. See JDK-8237483.
1373 __ nop();
1374 }
1375 int pc_for_athrow_offset = __ offset();
1376 InternalAddress pc_for_athrow(__ pc());
1377 __ la(exceptionPC->as_register(), pc_for_athrow);
1378 add_call_info(pc_for_athrow_offset, info); // for exception handler
1379
1380 __ verify_not_null_oop(x10);
1381 // search an exception handler (x10: exception oop, x13: throwing pc)
1382 if (compilation()->has_fpu_code()) {
1383 unwind_id = StubId::c1_handle_exception_id;
1384 } else {
1385 unwind_id = StubId::c1_handle_exception_nofpu_id;
1386 }
1387 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
1388 __ nop();
1389 }
1390
1391 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1392 assert(exceptionOop->as_register() == x10, "must match");
1393 __ j(_unwind_handler_entry);
1394 }
1395
1396 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
1397 Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
1398 Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
1399 Register count_reg = count->as_register();
1400 if (dest->is_single_cpu()) {
1401 assert (dest->type() == T_INT, "unexpected result type");
1402 assert (left->type() == T_INT, "unexpected left type");
1403 __ andi(t0, count_reg, 31); // should not shift more than 31 bits
1404 switch (code) {
1405 case lir_shl: __ sllw(dest_reg, left_reg, t0); break;
1406 case lir_shr: __ sraw(dest_reg, left_reg, t0); break;
1407 case lir_ushr: __ srlw(dest_reg, left_reg, t0); break;
1408 default: ShouldNotReachHere();
1409 }
1410 } else if (dest->is_double_cpu()) {
1411 __ andi(t0, count_reg, 63); // should not shift more than 63 bits
1412 switch (code) {
1413 case lir_shl: __ sll(dest_reg, left_reg, t0); break;
1414 case lir_shr: __ sra(dest_reg, left_reg, t0); break;
1415 case lir_ushr: __ srl(dest_reg, left_reg, t0); break;
1416 default: ShouldNotReachHere();
1417 }
1418 } else {
1419 ShouldNotReachHere();
1420 }
1421 }
1422
1423 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
1424 Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
1425 Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
1426 if (dest->is_single_cpu()) {
1427 assert (dest->type() == T_INT, "unexpected result type");
1428 assert (left->type() == T_INT, "unexpected left type");
1429 count &= 0x1f;
1430 if (count != 0) {
1431 switch (code) {
1432 case lir_shl: __ slliw(dest_reg, left_reg, count); break;
1433 case lir_shr: __ sraiw(dest_reg, left_reg, count); break;
1434 case lir_ushr: __ srliw(dest_reg, left_reg, count); break;
1435 default: ShouldNotReachHere();
1436 }
1437 } else {
1438 move_regs(left_reg, dest_reg);
1439 }
1440 } else if (dest->is_double_cpu()) {
1441 count &= 0x3f;
1442 if (count != 0) {
1443 switch (code) {
1444 case lir_shl: __ slli(dest_reg, left_reg, count); break;
1445 case lir_shr: __ srai(dest_reg, left_reg, count); break;
1446 case lir_ushr: __ srli(dest_reg, left_reg, count); break;
1447 default: ShouldNotReachHere();
1448 }
1449 } else {
1450 move_regs(left->as_register_lo(), dest->as_register_lo());
1451 }
1452 } else {
1453 ShouldNotReachHere();
1454 }
1455 }
1456
1457 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
1458 Register obj = op->obj_opr()->as_register(); // may not be an oop
1459 Register hdr = op->hdr_opr()->as_register();
1460 Register lock = op->lock_opr()->as_register();
1461 Register temp = op->scratch_opr()->as_register();
1462 if (op->code() == lir_lock) {
1463 // add debug info for NullPointerException only if one is possible
1464 int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
1465 if (op->info() != nullptr) {
1466 add_debug_info_for_null_check(null_check_offset, op->info());
1467 }
1468 } else if (op->code() == lir_unlock) {
1469 __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry());
1470 } else {
1471 Unimplemented();
1472 }
1473 __ bind(*op->stub()->continuation());
1474 }
1475
1476 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
1477 Register obj = op->obj()->as_pointer_register();
1478 Register result = op->result_opr()->as_pointer_register();
1479
1480 CodeEmitInfo* info = op->info();
1481 if (info != nullptr) {
1482 add_debug_info_for_null_check_here(info);
1483 }
1484
1485 __ load_klass(result, obj);
1486 }
1487
1488 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
1489 ciMethod* method = op->profiled_method();
1490 int bci = op->profiled_bci();
1491
1492 // Update counter for all call types
1493 ciMethodData* md = method->method_data_or_null();
1494 guarantee(md != nullptr, "Sanity");
1495 ciProfileData* data = md->bci_to_data(bci);
1496 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
1497 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
1498 Register mdo = op->mdo()->as_register();
1499 __ mov_metadata(mdo, md->constant_encoding());
1500 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1501 // Perform additional virtual call profiling for invokevirtual and
1502 // invokeinterface bytecodes
1503 if (op->should_profile_receiver_type()) {
1504 assert(op->recv()->is_single_cpu(), "recv must be allocated");
1505 Register recv = op->recv()->as_register();
1506 assert_different_registers(mdo, recv);
1507 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
1508 ciKlass* known_klass = op->known_holder();
1509 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
1510 // We know the type that will be seen at this call site; we can
1511 // statically update the MethodData* rather than needing to do
1512 // dynamic tests on the receiver type
1513 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
1514 for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
1515 ciKlass* receiver = vc_data->receiver(i);
1516 if (known_klass->equals(receiver)) {
1517 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
1518 __ increment(data_addr, DataLayout::counter_increment);
1519 return;
1520 }
1521 }
1522 // Receiver type is not found in profile data.
1523 // Fall back to runtime helper to handle the rest at runtime.
1524 __ mov_metadata(recv, known_klass->constant_encoding());
1525 } else {
1526 __ load_klass(recv, recv);
1527 }
1528 type_profile_helper(mdo, md, data, recv);
1529 } else {
1530 // Static call
1531 __ increment(counter_addr, DataLayout::counter_increment);
1532 }
1533 }
1534
1535 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
1536 __ la(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
1537 }
1538
1539 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
1540 assert(op->crc()->is_single_cpu(), "crc must be register");
1541 assert(op->val()->is_single_cpu(), "byte value must be register");
1542 assert(op->result_opr()->is_single_cpu(), "result must be register");
1543 Register crc = op->crc()->as_register();
1544 Register val = op->val()->as_register();
1545 Register res = op->result_opr()->as_register();
1546
1547 assert_different_registers(val, crc, res);
1548 __ la(res, ExternalAddress(StubRoutines::crc_table_addr()));
1549
1550 __ notr(crc, crc); // ~crc
1551 __ zext(crc, crc, 32);
1552 __ update_byte_crc32(crc, val, res);
1553 __ notr(res, crc); // ~crc
1554 }
1555
1556 void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
1557 Register tmp, Label &next, Label &none,
1558 Address mdo_addr) {
1559 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
1560 if (exact_klass != nullptr) {
1561 __ mov_metadata(tmp, exact_klass->constant_encoding());
1562 } else {
1563 __ load_klass(tmp, tmp);
1564 }
1565
1566 __ ld(t1, mdo_addr);
1567 __ xorr(tmp, tmp, t1);
1568 __ andi(t0, tmp, TypeEntries::type_klass_mask);
1569 // klass seen before, nothing to do. The unknown bit may have been
1570 // set already but no need to check.
1571 __ beqz(t0, next);
1572
1573 // already unknown. Nothing to do anymore.
1574 __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown));
1575 __ bnez(t0, next);
1576
1577 if (TypeEntries::is_type_none(current_klass)) {
1578 __ beqz(t1, none);
1579 __ mv(t0, (u1)TypeEntries::null_seen);
1580 __ beq(t0, t1, none);
1581 // There is a chance that the checks above
1582 // fail if another thread has just set the
1583 // profiling to this obj's klass
1584 __ membar(MacroAssembler::LoadLoad);
1585 __ xorr(tmp, tmp, t1); // get back original value before XOR
1586 __ ld(t1, mdo_addr);
1587 __ xorr(tmp, tmp, t1);
1588 __ andi(t0, tmp, TypeEntries::type_klass_mask);
1589 __ beqz(t0, next);
1590 }
1591 } else {
1592 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
1593 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
1594
1595 __ ld(tmp, mdo_addr);
1596 // already unknown. Nothing to do anymore.
1597 __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown));
1598 __ bnez(t0, next);
1599 }
1600
1601 // different than before. Cannot keep accurate profile.
1602 __ ld(t1, mdo_addr);
1603 __ ori(t1, t1, TypeEntries::type_unknown);
1604 __ sd(t1, mdo_addr);
1605
1606 if (TypeEntries::is_type_none(current_klass)) {
1607 __ j(next);
1608
1609 __ bind(none);
1610 // first time here. Set profile type.
1611 __ sd(tmp, mdo_addr);
1612 #ifdef ASSERT
1613 __ andi(tmp, tmp, TypeEntries::type_mask);
1614 __ verify_klass_ptr(tmp);
1615 #endif
1616 }
1617 }
1618
1619 void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp,
1620 Address mdo_addr, Label &next) {
1621 // There's a single possible klass at this profile point
1622 assert(exact_klass != nullptr, "should be");
1623 if (TypeEntries::is_type_none(current_klass)) {
1624 __ mov_metadata(tmp, exact_klass->constant_encoding());
1625 __ ld(t1, mdo_addr);
1626 __ xorr(tmp, tmp, t1);
1627 __ andi(t0, tmp, TypeEntries::type_klass_mask);
1628 __ beqz(t0, next);
1629 #ifdef ASSERT
1630 {
1631 Label ok;
1632 __ ld(t0, mdo_addr);
1633 __ beqz(t0, ok);
1634 __ mv(t1, (u1)TypeEntries::null_seen);
1635 __ beq(t0, t1, ok);
1636 // may have been set by another thread
1637 __ membar(MacroAssembler::LoadLoad);
1638 __ mov_metadata(t0, exact_klass->constant_encoding());
1639 __ ld(t1, mdo_addr);
1640 __ xorr(t1, t0, t1);
1641 __ andi(t1, t1, TypeEntries::type_mask);
1642 __ beqz(t1, ok);
1643
1644 __ stop("unexpected profiling mismatch");
1645 __ bind(ok);
1646 }
1647 #endif
1648 // first time here. Set profile type.
1649 __ sd(tmp, mdo_addr);
1650 #ifdef ASSERT
1651 __ andi(tmp, tmp, TypeEntries::type_mask);
1652 __ verify_klass_ptr(tmp);
1653 #endif
1654 } else {
1655 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
1656 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
1657
1658 __ ld(tmp, mdo_addr);
1659 // already unknown. Nothing to do anymore.
1660 __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown));
1661 __ bnez(t0, next);
1662
1663 __ ori(tmp, tmp, TypeEntries::type_unknown);
1664 __ sd(tmp, mdo_addr);
1665 }
1666 }
1667
1668 void LIR_Assembler::check_null(Register tmp, Label &update, intptr_t current_klass,
1669 Address mdo_addr, bool do_update, Label &next) {
1670 __ bnez(tmp, update);
1671 if (!TypeEntries::was_null_seen(current_klass)) {
1672 __ ld(t1, mdo_addr);
1673 __ ori(t1, t1, TypeEntries::null_seen);
1674 __ sd(t1, mdo_addr);
1675 }
1676 if (do_update) {
1677 __ j(next);
1678 }
1679 }
1680
1681 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
1682 COMMENT("emit_profile_type {");
1683 Register obj = op->obj()->as_register();
1684 Register tmp = op->tmp()->as_pointer_register();
1685 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
1686 ciKlass* exact_klass = op->exact_klass();
1687 intptr_t current_klass = op->current_klass();
1688 bool not_null = op->not_null();
1689 bool no_conflict = op->no_conflict();
1690
1691 Label update, next, none;
1692
1693 bool do_null = !not_null;
1694 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
1695 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
1696
1697 assert(do_null || do_update, "why are we here?");
1698 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
1699 assert_different_registers(tmp, t0, t1, mdo_addr.base());
1700
1701 __ verify_oop(obj);
1702
1703 if (tmp != obj) {
1704 __ mv(tmp, obj);
1705 }
1706 if (do_null) {
1707 check_null(tmp, update, current_klass, mdo_addr, do_update, next);
1708 #ifdef ASSERT
1709 } else {
1710 __ bnez(tmp, update);
1711 __ stop("unexpected null obj");
1712 #endif
1713 }
1714
1715 __ bind(update);
1716
1717 if (do_update) {
1718 #ifdef ASSERT
1719 if (exact_klass != nullptr) {
1720 check_exact_klass(tmp, exact_klass);
1721 }
1722 #endif
1723 if (!no_conflict) {
1724 check_conflict(exact_klass, current_klass, tmp, next, none, mdo_addr);
1725 } else {
1726 check_no_conflict(exact_klass, current_klass, tmp, mdo_addr, next);
1727 }
1728
1729 __ bind(next);
1730 }
1731 COMMENT("} emit_profile_type");
1732 }
1733
1734 void LIR_Assembler::align_backward_branch_target() { }
1735
1736 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
1737 // tmp must be unused
1738 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
1739
1740 if (left->is_single_cpu()) {
1741 assert(dest->is_single_cpu(), "expect single result reg");
1742 __ negw(dest->as_register(), left->as_register());
1743 } else if (left->is_double_cpu()) {
1744 assert(dest->is_double_cpu(), "expect double result reg");
1745 __ neg(dest->as_register_lo(), left->as_register_lo());
1746 } else if (left->is_single_fpu()) {
1747 assert(dest->is_single_fpu(), "expect single float result reg");
1748 __ fneg_s(dest->as_float_reg(), left->as_float_reg());
1749 } else {
1750 assert(left->is_double_fpu(), "expect double float operand reg");
1751 assert(dest->is_double_fpu(), "expect double float result reg");
1752 __ fneg_d(dest->as_double_reg(), left->as_double_reg());
1753 }
1754 }
1755
1756
1757 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1758 if (patch_code != lir_patch_none) {
1759 deoptimize_trap(info);
1760 return;
1761 }
1762
1763 LIR_Address* adr = addr->as_address_ptr();
1764 Register dst = dest->as_pointer_register();
1765
1766 assert_different_registers(dst, t0);
1767 if (adr->base()->is_valid() && dst == adr->base()->as_pointer_register() && (!adr->index()->is_cpu_register())) {
1768 int scale = adr->scale();
1769 intptr_t offset = adr->disp();
1770 LIR_Opr index_op = adr->index();
1771 if (index_op->is_constant()) {
1772 offset += ((intptr_t)index_op->as_constant_ptr()->as_jint()) << scale;
1773 }
1774
1775 if (!Assembler::is_simm12(offset)) {
1776 __ la(t0, as_Address(adr));
1777 __ mv(dst, t0);
1778 return;
1779 }
1780 }
1781
1782 __ la(dst, as_Address(adr));
1783 }
1784
1785
1786 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
1787 assert(!tmp->is_valid(), "don't need temporary");
1788
1789 Assembler::IncompressibleScope scope(_masm);
1790 // Post call nops must be natural aligned due to cmodx rules.
1791 align_call(lir_rtcall);
1792
1793 __ rt_call(dest);
1794
1795 if (info != nullptr) {
1796 add_call_info_here(info);
1797 }
1798 __ post_call_nop();
1799 }
1800
1801 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
1802 if (dest->is_address() || src->is_address()) {
1803 move_op(src, dest, type, lir_patch_none, info, /* wide */ false);
1804 } else {
1805 ShouldNotReachHere();
1806 }
1807 }
1808
1809 #ifdef ASSERT
1810 // emit run-time assertion
1811 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
1812 assert(op->code() == lir_assert, "must be");
1813
1814 Label ok;
1815 if (op->in_opr1()->is_valid()) {
1816 assert(op->in_opr2()->is_valid(), "both operands must be valid");
1817 bool is_unordered = false;
1818 LIR_Condition cond = op->condition();
1819 emit_branch(cond, op->in_opr1(), op->in_opr2(), ok, /* is_far */ false,
1820 /* is_unordered */(cond == lir_cond_greaterEqual || cond == lir_cond_greater) ? false : true);
1821 } else {
1822 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
1823 assert(op->condition() == lir_cond_always, "no other conditions allowed");
1824 }
1825
1826 if (op->halt()) {
1827 const char* str = __ code_string(op->msg());
1828 __ stop(str);
1829 } else {
1830 breakpoint();
1831 }
1832 __ bind(ok);
1833 }
1834 #endif
1835
1836 #ifndef PRODUCT
1837 #define COMMENT(x) do { __ block_comment(x); } while (0)
1838 #else
1839 #define COMMENT(x)
1840 #endif
1841
1842 void LIR_Assembler::membar() {
1843 COMMENT("membar");
1844 __ membar(MacroAssembler::AnyAny);
1845 }
1846
1847 void LIR_Assembler::membar_acquire() {
1848 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
1849 }
1850
1851 void LIR_Assembler::membar_release() {
1852 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1853 }
1854
1855 void LIR_Assembler::membar_loadload() {
1856 __ membar(MacroAssembler::LoadLoad);
1857 }
1858
1859 void LIR_Assembler::membar_storestore() {
1860 __ membar(MacroAssembler::StoreStore);
1861 }
1862
1863 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
1864
1865 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
1866
1867 void LIR_Assembler::on_spin_wait() {
1868 __ pause();
1869 }
1870
1871 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
1872 __ mv(result_reg->as_register(), xthread);
1873 }
1874
1875 void LIR_Assembler::peephole(LIR_List *lir) {}
1876
1877 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
1878 Address addr = as_Address(src->as_address_ptr());
1879 BasicType type = src->type();
1880 bool is_oop = is_reference_type(type);
1881
1882 get_op(type);
1883
1884 switch (code) {
1885 case lir_xadd:
1886 {
1887 RegisterOrConstant inc;
1888 Register tmp = as_reg(tmp_op);
1889 Register dst = as_reg(dest);
1890 if (data->is_constant()) {
1891 inc = RegisterOrConstant(as_long(data));
1892 assert_different_registers(dst, addr.base(), tmp);
1893 assert_different_registers(tmp, t0);
1894 } else {
1895 inc = RegisterOrConstant(as_reg(data));
1896 assert_different_registers(inc.as_register(), dst, addr.base(), tmp);
1897 }
1898 __ la(tmp, addr);
1899 (_masm->*add)(dst, inc, tmp);
1900 break;
1901 }
1902 case lir_xchg:
1903 {
1904 Register tmp = tmp_op->as_register();
1905 Register obj = as_reg(data);
1906 Register dst = as_reg(dest);
1907 if (is_oop && UseCompressedOops) {
1908 __ encode_heap_oop(t0, obj);
1909 obj = t0;
1910 }
1911 assert_different_registers(obj, addr.base(), tmp);
1912 assert_different_registers(dst, addr.base(), tmp);
1913 __ la(tmp, addr);
1914 (_masm->*xchg)(dst, obj, tmp);
1915 if (is_oop && UseCompressedOops) {
1916 __ decode_heap_oop(dst);
1917 }
1918 }
1919 break;
1920 default:
1921 ShouldNotReachHere();
1922 }
1923 __ membar(MacroAssembler::AnyAny);
1924 }
1925
1926 int LIR_Assembler::array_element_size(BasicType type) const {
1927 int elem_size = type2aelembytes(type);
1928 return exact_log2(elem_size);
1929 }
1930
1931 // helper functions which checks for overflow and sets bailout if it
1932 // occurs. Always returns a valid embeddable pointer but in the
1933 // bailout case the pointer won't be to unique storage.
1934 address LIR_Assembler::float_constant(float f) {
1935 address const_addr = __ float_constant(f);
1936 if (const_addr == nullptr) {
1937 bailout("const section overflow");
1938 return __ code()->consts()->start();
1939 } else {
1940 return const_addr;
1941 }
1942 }
1943
1944 address LIR_Assembler::double_constant(double d) {
1945 address const_addr = __ double_constant(d);
1946 if (const_addr == nullptr) {
1947 bailout("const section overflow");
1948 return __ code()->consts()->start();
1949 } else {
1950 return const_addr;
1951 }
1952 }
1953
1954 address LIR_Assembler::int_constant(jlong n) {
1955 address const_addr = __ long_constant(n);
1956 if (const_addr == nullptr) {
1957 bailout("const section overflow");
1958 return __ code()->consts()->start();
1959 } else {
1960 return const_addr;
1961 }
1962 }
1963
1964 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1965 __ cmpxchg(addr, cmpval, newval, Assembler::int32, Assembler::aq /* acquire */,
1966 Assembler::rl /* release */, t0, true /* result as bool */);
1967 __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1
1968 __ membar(MacroAssembler::AnyAny);
1969 }
1970
1971 void LIR_Assembler::caswu(Register addr, Register newval, Register cmpval) {
1972 __ cmpxchg(addr, cmpval, newval, Assembler::uint32, Assembler::aq /* acquire */,
1973 Assembler::rl /* release */, t0, true /* result as bool */);
1974 __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1
1975 __ membar(MacroAssembler::AnyAny);
1976 }
1977
1978 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1979 __ cmpxchg(addr, cmpval, newval, Assembler::int64, Assembler::aq /* acquire */,
1980 Assembler::rl /* release */, t0, true /* result as bool */);
1981 __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1
1982 __ membar(MacroAssembler::AnyAny);
1983 }
1984
1985 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
1986 address target = nullptr;
1987
1988 switch (patching_id(info)) {
1989 case PatchingStub::access_field_id:
1990 target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
1991 break;
1992 case PatchingStub::load_klass_id:
1993 target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
1994 break;
1995 case PatchingStub::load_mirror_id:
1996 target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
1997 break;
1998 case PatchingStub::load_appendix_id:
1999 target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
2000 break;
2001 default: ShouldNotReachHere();
2002 }
2003
2004 __ far_call(RuntimeAddress(target));
2005 add_call_info_here(info);
2006 }
2007
2008 void LIR_Assembler::check_exact_klass(Register tmp, ciKlass* exact_klass) {
2009 Label ok;
2010 __ load_klass(tmp, tmp);
2011 __ mov_metadata(t0, exact_klass->constant_encoding());
2012 __ beq(tmp, t0, ok);
2013 __ stop("exact klass and actual klass differ");
2014 __ bind(ok);
2015 }
2016
2017 void LIR_Assembler::get_op(BasicType type) {
2018 switch (type) {
2019 case T_INT:
2020 xchg = &MacroAssembler::atomic_xchgalw;
2021 add = &MacroAssembler::atomic_addalw;
2022 break;
2023 case T_LONG:
2024 xchg = &MacroAssembler::atomic_xchgal;
2025 add = &MacroAssembler::atomic_addal;
2026 break;
2027 case T_OBJECT:
2028 case T_ARRAY:
2029 if (UseCompressedOops) {
2030 xchg = &MacroAssembler::atomic_xchgalwu;
2031 add = &MacroAssembler::atomic_addalw;
2032 } else {
2033 xchg = &MacroAssembler::atomic_xchgal;
2034 add = &MacroAssembler::atomic_addal;
2035 }
2036 break;
2037 default:
2038 ShouldNotReachHere();
2039 }
2040 }
2041
2042 // emit_opTypeCheck sub functions
2043 void LIR_Assembler::typecheck_lir_store(LIR_OpTypeCheck* op, bool should_profile) {
2044 Register value = op->object()->as_register();
2045 Register array = op->array()->as_register();
2046 Register k_RInfo = op->tmp1()->as_register();
2047 Register klass_RInfo = op->tmp2()->as_register();
2048 Register Rtmp1 = op->tmp3()->as_register();
2049
2050 CodeStub* stub = op->stub();
2051
2052 // check if it needs to be profiled
2053 ciMethodData* md = nullptr;
2054 ciProfileData* data = nullptr;
2055
2056 if (should_profile) {
2057 data_check(op, &md, &data);
2058 }
2059 Label done;
2060 Label* success_target = &done;
2061 Label* failure_target = stub->entry();
2062
2063 if (should_profile) {
2064 profile_object(md, data, value, k_RInfo, klass_RInfo, &done);
2065 } else {
2066 __ beqz(value, done);
2067 }
2068
2069 add_debug_info_for_null_check_here(op->info_for_exception());
2070 __ load_klass(k_RInfo, array);
2071 __ load_klass(klass_RInfo, value);
2072
2073 lir_store_slowcheck(k_RInfo, klass_RInfo, Rtmp1, success_target, failure_target);
2074
2075 __ bind(done);
2076 }
2077
2078 void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo, Register Rtmp1,
2079 Label* success_target, Label* failure_target) {
2080 // get instance klass (it's already uncompressed)
2081 __ ld(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
2082 // perform the fast part of the checking logic
2083 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
2084 // call out-of-line instance of __ check_klass_subtype_slow_path(...)
2085 __ subi(sp, sp, 2 * wordSize); // 2: store k_RInfo and klass_RInfo
2086 __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass
2087 __ sd(k_RInfo, Address(sp, 0)); // super klass
2088 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2089 // load result to k_RInfo
2090 __ ld(k_RInfo, Address(sp, 0));
2091 __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
2092 // result is a boolean
2093 __ beqz(k_RInfo, *failure_target, /* is_far */ true);
2094 }
2095
2096 void LIR_Assembler::const2reg_helper(LIR_Opr src) {
2097 switch (src->as_constant_ptr()->type()) {
2098 case T_INT:
2099 case T_ADDRESS:
2100 case T_OBJECT:
2101 case T_ARRAY:
2102 case T_METADATA:
2103 const2reg(src, FrameMap::t0_opr, lir_patch_none, nullptr);
2104 break;
2105 case T_LONG:
2106 const2reg(src, FrameMap::t0_long_opr, lir_patch_none, nullptr);
2107 break;
2108 case T_FLOAT:
2109 case T_DOUBLE:
2110 default:
2111 ShouldNotReachHere();
2112 }
2113 }
2114
2115 void LIR_Assembler::logic_op_reg32(Register dst, Register left, Register right, LIR_Code code) {
2116 switch (code) {
2117 case lir_logic_and: __ andrw(dst, left, right); break;
2118 case lir_logic_or: __ orrw (dst, left, right); break;
2119 case lir_logic_xor: __ xorrw(dst, left, right); break;
2120 default: ShouldNotReachHere();
2121 }
2122 }
2123
2124 void LIR_Assembler::logic_op_reg(Register dst, Register left, Register right, LIR_Code code) {
2125 switch (code) {
2126 case lir_logic_and: __ andr(dst, left, right); break;
2127 case lir_logic_or: __ orr (dst, left, right); break;
2128 case lir_logic_xor: __ xorr(dst, left, right); break;
2129 default: ShouldNotReachHere();
2130 }
2131 }
2132
2133 void LIR_Assembler::logic_op_imm(Register dst, Register left, int right, LIR_Code code) {
2134 switch (code) {
2135 case lir_logic_and: __ andi(dst, left, right); break;
2136 case lir_logic_or: __ ori (dst, left, right); break;
2137 case lir_logic_xor: __ xori(dst, left, right); break;
2138 default: ShouldNotReachHere();
2139 }
2140 }
2141
2142 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2143 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2144 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2145 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2146 __ sd(r, Address(sp, offset_from_rsp_in_bytes));
2147 }
2148
2149 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2150 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2151 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2152 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2153 __ mv(t0, c);
2154 __ sd(t0, Address(sp, offset_from_rsp_in_bytes));
2155 }
2156
2157 #undef __