1 /*
2 * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4 * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "asm/assembler.hpp"
29 #include "asm/macroAssembler.inline.hpp"
30 #include "c1/c1_CodeStubs.hpp"
31 #include "c1/c1_Compilation.hpp"
32 #include "c1/c1_LIRAssembler.hpp"
33 #include "c1/c1_MacroAssembler.hpp"
34 #include "c1/c1_Runtime1.hpp"
35 #include "c1/c1_ValueStack.hpp"
36 #include "ci/ciArrayKlass.hpp"
37 #include "ci/ciInstance.hpp"
38 #include "code/compiledIC.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "nativeInst_riscv.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/powerOfTwo.hpp"
45 #include "vmreg_riscv.inline.hpp"
46
47 #ifndef PRODUCT
48 #define COMMENT(x) do { __ block_comment(x); } while (0)
49 #else
50 #define COMMENT(x)
51 #endif
52
53 NEEDS_CLEANUP // remove this definitions ?
54 const Register IC_Klass = t1; // where the IC klass is cached
55 const Register SYNC_header = x10; // synchronization header
56 const Register SHIFT_count = x10; // where count for shift operations must be
57
58 #define __ _masm->
59
60 static void select_different_registers(Register preserve,
61 Register extra,
62 Register &tmp1,
63 Register &tmp2) {
64 if (tmp1 == preserve) {
65 assert_different_registers(tmp1, tmp2, extra);
66 tmp1 = extra;
67 } else if (tmp2 == preserve) {
68 assert_different_registers(tmp1, tmp2, extra);
69 tmp2 = extra;
70 }
71 assert_different_registers(preserve, tmp1, tmp2);
72 }
73
74 static void select_different_registers(Register preserve,
75 Register extra,
76 Register &tmp1,
77 Register &tmp2,
78 Register &tmp3) {
79 if (tmp1 == preserve) {
80 assert_different_registers(tmp1, tmp2, tmp3, extra);
81 tmp1 = extra;
82 } else if (tmp2 == preserve) {
83 assert_different_registers(tmp1, tmp2, tmp3, extra);
84 tmp2 = extra;
85 } else if (tmp3 == preserve) {
86 assert_different_registers(tmp1, tmp2, tmp3, extra);
87 tmp3 = extra;
88 }
89 assert_different_registers(preserve, tmp1, tmp2, tmp3);
90 }
91
92 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
93
94 void LIR_Assembler::clinit_barrier(ciMethod* method) {
95 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
96 assert(!method->holder()->is_not_initialized(), "initialization should have been started");
97
98 Label L_skip_barrier;
99
100 __ mov_metadata(t1, method->holder()->constant_encoding());
101 __ clinit_barrier(t1, t0, &L_skip_barrier /* L_fast_path */);
102 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
103 __ bind(L_skip_barrier);
104 }
105
106 LIR_Opr LIR_Assembler::receiverOpr() {
107 return FrameMap::receiver_opr;
108 }
109
110 LIR_Opr LIR_Assembler::osrBufferPointer() {
111 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
112 }
113
114 void LIR_Assembler::breakpoint() { Unimplemented(); }
115
116 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
117
118 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
119
120 static jlong as_long(LIR_Opr data) {
121 jlong result;
122 switch (data->type()) {
123 case T_INT:
124 result = (data->as_jint());
125 break;
126 case T_LONG:
127 result = (data->as_jlong());
128 break;
129 default:
130 ShouldNotReachHere();
131 result = 0; // unreachable
132 }
133 return result;
134 }
135
136 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
137 if (addr->base()->is_illegal()) {
138 assert(addr->index()->is_illegal(), "must be illegal too");
139 __ movptr(tmp, addr->disp());
140 return Address(tmp, 0);
141 }
142
143 Register base = addr->base()->as_pointer_register();
144 LIR_Opr index_opr = addr->index();
145
146 if (index_opr->is_illegal()) {
147 return Address(base, addr->disp());
148 }
149
150 int scale = addr->scale();
151 if (index_opr->is_cpu_register()) {
152 Register index;
153 if (index_opr->is_single_cpu()) {
154 index = index_opr->as_register();
155 } else {
156 index = index_opr->as_register_lo();
157 }
158 if (scale != 0) {
159 __ shadd(tmp, index, base, tmp, scale);
160 } else {
161 __ add(tmp, base, index);
162 }
163 return Address(tmp, addr->disp());
164 } else if (index_opr->is_constant()) {
165 intptr_t addr_offset = (((intptr_t)index_opr->as_constant_ptr()->as_jint()) << scale) + addr->disp();
166 return Address(base, addr_offset);
167 }
168
169 Unimplemented();
170 return Address();
171 }
172
173 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
174 ShouldNotReachHere();
175 return Address();
176 }
177
178 Address LIR_Assembler::as_Address(LIR_Address* addr) {
179 return as_Address(addr, t0);
180 }
181
182 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
183 return as_Address(addr);
184 }
185
186 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
187 // not encodable as a base + (immediate) offset, generate an explicit address
188 // calculation to hold the address in t0.
189 Address LIR_Assembler::stack_slot_address(int index, uint size, int adjust) {
190 precond(size == 4 || size == 8);
191 Address addr = frame_map()->address_for_slot(index, adjust);
192 precond(addr.getMode() == Address::base_plus_offset);
193 precond(addr.base() == sp);
194 precond(addr.offset() > 0);
195 uint mask = size - 1;
196 assert((addr.offset() & mask) == 0, "scaled offsets only");
197
198 return addr;
199 }
200
201 void LIR_Assembler::osr_entry() {
202 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
203 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
204 guarantee(osr_entry != nullptr, "null osr_entry!");
205 ValueStack* entry_state = osr_entry->state();
206 int number_of_locks = entry_state->locks_size();
207
208 // we jump here if osr happens with the interpreter
209 // state set up to continue at the beginning of the
210 // loop that triggered osr - in particular, we have
211 // the following registers setup:
212 //
213 // x12: osr buffer
214 //
215
216 //build frame
217 ciMethod* m = compilation()->method();
218 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
219
220 // OSR buffer is
221 //
222 // locals[nlocals-1..0]
223 // monitors[0..number_of_locks]
224 //
225 // locals is a direct copy of the interpreter frame so in the osr buffer
226 // so first slot in the local array is the last local from the interpreter
227 // and last slot is local[0] (receiver) from the interpreter
228 //
229 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
230 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
231 // in the interpreter frame (the method lock if a sync method)
232
233 // Initialize monitors in the compiled activation.
234 // x12: pointer to osr buffer
235 // All other registers are dead at this point and the locals will be
236 // copied into place by code emitted in the IR.
237
238 Register OSR_buf = osrBufferPointer()->as_pointer_register();
239 {
240 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
241 int monitor_offset = BytesPerWord * method()->max_locals() +
242 (2 * BytesPerWord) * (number_of_locks - 1);
243 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
244 // the OSR buffer using 2 word entries: first the lock and then
245 // the oop.
246 for (int i = 0; i < number_of_locks; i++) {
247 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
248 #ifdef ASSERT
249 // verify the interpreter's monitor has a non-null object
250 {
251 Label L;
252 __ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
253 __ bnez(t0, L);
254 __ stop("locked object is null");
255 __ bind(L);
256 }
257 #endif // ASSERT
258 __ ld(x9, Address(OSR_buf, slot_offset + 0));
259 __ sd(x9, frame_map()->address_for_monitor_lock(i));
260 __ ld(x9, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
261 __ sd(x9, frame_map()->address_for_monitor_object(i));
262 }
263 }
264 }
265
266 // inline cache check; done before the frame is built.
267 int LIR_Assembler::check_icache() {
268 Register receiver = FrameMap::receiver_opr->as_register();
269 Register ic_klass = IC_Klass;
270 int start_offset = __ offset();
271 Label dont;
272 __ inline_cache_check(receiver, ic_klass, dont);
273
274 // if icache check fails, then jump to runtime routine
275 // Note: RECEIVER must still contain the receiver!
276 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
277
278 // We align the verified entry point unless the method body
279 // (including its inline cache check) will fit in a single 64-byte
280 // icache line.
281 if (!method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
282 // force alignment after the cache check.
283 __ align(CodeEntryAlignment);
284 }
285
286 __ bind(dont);
287 return start_offset;
288 }
289
290 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
291 if (o == nullptr) {
292 __ mv(reg, zr);
293 } else {
294 __ movoop(reg, o);
295 }
296 }
297
298 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
299 deoptimize_trap(info);
300 }
301
302 // This specifies the rsp decrement needed to build the frame
303 int LIR_Assembler::initial_frame_size_in_bytes() const {
304 // if rounding, must let FrameMap know!
305
306 return in_bytes(frame_map()->framesize_in_bytes());
307 }
308
309 int LIR_Assembler::emit_exception_handler() {
310 // generate code for exception handler
311 address handler_base = __ start_a_stub(exception_handler_size());
312 if (handler_base == nullptr) {
313 // not enough space left for the handler
314 bailout("exception handler overflow");
315 return -1;
316 }
317
318 int offset = code_offset();
319
320 // the exception oop and pc are in x10, and x13
321 // no other registers need to be preserved, so invalidate them
322 __ invalidate_registers(false, true, true, false, true, true);
323
324 // check that there is really an exception
325 __ verify_not_null_oop(x10);
326
327 // search an exception handler (x10: exception oop, x13: throwing pc)
328 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
329 __ should_not_reach_here();
330 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
331 __ end_a_stub();
332
333 return offset;
334 }
335
336 // Emit the code to remove the frame from the stack in the exception
337 // unwind path.
338 int LIR_Assembler::emit_unwind_handler() {
339 #ifndef PRODUCT
340 if (CommentedAssembly) {
341 _masm->block_comment("Unwind handler");
342 }
343 #endif // PRODUCT
344
345 int offset = code_offset();
346
347 // Fetch the exception from TLS and clear out exception related thread state
348 __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
349 __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
350 __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
351
352 __ bind(_unwind_handler_entry);
353 __ verify_not_null_oop(x10);
354 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
355 __ mv(x9, x10); // Preserve the exception
356 }
357
358 // Perform needed unlocking
359 MonitorExitStub* stub = nullptr;
360 if (method()->is_synchronized()) {
361 monitor_address(0, FrameMap::r10_opr);
362 stub = new MonitorExitStub(FrameMap::r10_opr, true, 0);
363 if (LockingMode == LM_MONITOR) {
364 __ j(*stub->entry());
365 } else {
366 __ unlock_object(x15, x14, x10, x16, *stub->entry());
367 }
368 __ bind(*stub->continuation());
369 }
370
371 if (compilation()->env()->dtrace_method_probes()) {
372 __ mv(c_rarg0, xthread);
373 __ mov_metadata(c_rarg1, method()->constant_encoding());
374 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
375 }
376
377 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
378 __ mv(x10, x9); // Restore the exception
379 }
380
381 // remove the activation and dispatch to the unwind handler
382 __ block_comment("remove_frame and dispatch to the unwind handler");
383 __ remove_frame(initial_frame_size_in_bytes());
384 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
385
386 // Emit the slow path assembly
387 if (stub != nullptr) {
388 stub->emit_code(this);
389 }
390
391 return offset;
392 }
393
394 int LIR_Assembler::emit_deopt_handler() {
395 // generate code for exception handler
396 address handler_base = __ start_a_stub(deopt_handler_size());
397 if (handler_base == nullptr) {
398 // not enough space left for the handler
399 bailout("deopt handler overflow");
400 return -1;
401 }
402
403 int offset = code_offset();
404
405 __ auipc(ra, 0);
406 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
407 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
408 __ end_a_stub();
409
410 return offset;
411 }
412
413 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
414 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == x10, "word returns are in x10");
415
416 // Pop the stack before the safepoint code
417 __ remove_frame(initial_frame_size_in_bytes());
418
419 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
420 __ reserved_stack_check();
421 }
422
423 code_stub->set_safepoint_offset(__ offset());
424 __ relocate(relocInfo::poll_return_type);
425 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
426 __ ret();
427 }
428
429 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
430 guarantee(info != nullptr, "Shouldn't be null");
431 __ get_polling_page(t0, relocInfo::poll_type);
432 add_debug_info_for_branch(info); // This isn't just debug info:
433 // it's the oop map
434 __ read_polling_page(t0, 0, relocInfo::poll_type);
435 return __ offset();
436 }
437
438 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
439 __ mv(to_reg, from_reg);
440 }
441
442 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
443
444 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
445 assert(src->is_constant(), "should not call otherwise");
446 assert(dest->is_register(), "should not call otherwise");
447 LIR_Const* c = src->as_constant_ptr();
448 address const_addr = nullptr;
449
450 switch (c->type()) {
451 case T_INT:
452 assert(patch_code == lir_patch_none, "no patching handled here");
453 __ mv(dest->as_register(), c->as_jint());
454 break;
455
456 case T_ADDRESS:
457 assert(patch_code == lir_patch_none, "no patching handled here");
458 __ mv(dest->as_register(), c->as_jint());
459 break;
460
461 case T_LONG:
462 assert(patch_code == lir_patch_none, "no patching handled here");
463 __ mv(dest->as_register_lo(), (intptr_t)c->as_jlong());
464 break;
465
466 case T_OBJECT:
467 case T_ARRAY:
468 if (patch_code == lir_patch_none) {
469 jobject2reg(c->as_jobject(), dest->as_register());
470 } else {
471 jobject2reg_with_patching(dest->as_register(), info);
472 }
473 break;
474
475 case T_METADATA:
476 if (patch_code != lir_patch_none) {
477 klass2reg_with_patching(dest->as_register(), info);
478 } else {
479 __ mov_metadata(dest->as_register(), c->as_metadata());
480 }
481 break;
482
483 case T_FLOAT:
484 const_addr = float_constant(c->as_jfloat());
485 assert(const_addr != nullptr, "must create float constant in the constant table");
486 __ flw(dest->as_float_reg(), InternalAddress(const_addr));
487 break;
488
489 case T_DOUBLE:
490 const_addr = double_constant(c->as_jdouble());
491 assert(const_addr != nullptr, "must create double constant in the constant table");
492 __ fld(dest->as_double_reg(), InternalAddress(const_addr));
493 break;
494
495 default:
496 ShouldNotReachHere();
497 }
498 }
499
500 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
501 assert(src->is_constant(), "should not call otherwise");
502 assert(dest->is_stack(), "should not call otherwise");
503 LIR_Const* c = src->as_constant_ptr();
504 switch (c->type()) {
505 case T_OBJECT:
506 if (c->as_jobject() == nullptr) {
507 __ sd(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
508 } else {
509 const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
510 reg2stack(FrameMap::t1_opr, dest, c->type(), false);
511 }
512 break;
513 case T_ADDRESS: // fall through
514 const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
515 reg2stack(FrameMap::t1_opr, dest, c->type(), false);
516 case T_INT: // fall through
517 case T_FLOAT:
518 if (c->as_jint_bits() == 0) {
519 __ sw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
520 } else {
521 __ mv(t1, c->as_jint_bits());
522 __ sw(t1, frame_map()->address_for_slot(dest->single_stack_ix()));
523 }
524 break;
525 case T_LONG: // fall through
526 case T_DOUBLE:
527 if (c->as_jlong_bits() == 0) {
528 __ sd(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
529 lo_word_offset_in_bytes));
530 } else {
531 __ mv(t1, (intptr_t)c->as_jlong_bits());
532 __ sd(t1, frame_map()->address_for_slot(dest->double_stack_ix(),
533 lo_word_offset_in_bytes));
534 }
535 break;
536 default:
537 ShouldNotReachHere();
538 }
539 }
540
541 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
542 assert(src->is_constant(), "should not call otherwise");
543 assert(dest->is_address(), "should not call otherwise");
544 LIR_Const* c = src->as_constant_ptr();
545 LIR_Address* to_addr = dest->as_address_ptr();
546 void (MacroAssembler::* insn)(Register Rt, const Address &adr, Register temp);
547 switch (type) {
548 case T_ADDRESS:
549 assert(c->as_jint() == 0, "should be");
550 insn = &MacroAssembler::sd; break;
551 case T_LONG:
552 assert(c->as_jlong() == 0, "should be");
553 insn = &MacroAssembler::sd; break;
554 case T_DOUBLE:
555 assert(c->as_jdouble() == 0.0, "should be");
556 insn = &MacroAssembler::sd; break;
557 case T_INT:
558 assert(c->as_jint() == 0, "should be");
559 insn = &MacroAssembler::sw; break;
560 case T_FLOAT:
561 assert(c->as_jfloat() == 0.0f, "should be");
562 insn = &MacroAssembler::sw; break;
563 case T_OBJECT: // fall through
564 case T_ARRAY:
565 assert(c->as_jobject() == 0, "should be");
566 if (UseCompressedOops && !wide) {
567 insn = &MacroAssembler::sw;
568 } else {
569 insn = &MacroAssembler::sd;
570 }
571 break;
572 case T_CHAR: // fall through
573 case T_SHORT:
574 assert(c->as_jint() == 0, "should be");
575 insn = &MacroAssembler::sh;
576 break;
577 case T_BOOLEAN: // fall through
578 case T_BYTE:
579 assert(c->as_jint() == 0, "should be");
580 insn = &MacroAssembler::sb; break;
581 default:
582 ShouldNotReachHere();
583 insn = &MacroAssembler::sd; // unreachable
584 }
585 if (info != nullptr) {
586 add_debug_info_for_null_check_here(info);
587 }
588 (_masm->*insn)(zr, as_Address(to_addr), t0);
589 }
590
591 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
592 assert(src->is_register(), "should not call otherwise");
593 assert(dest->is_register(), "should not call otherwise");
594
595 // move between cpu-registers
596 if (dest->is_single_cpu()) {
597 if (src->type() == T_LONG) {
598 // Can do LONG -> OBJECT
599 move_regs(src->as_register_lo(), dest->as_register());
600 return;
601 }
602 assert(src->is_single_cpu(), "must match");
603 if (src->type() == T_OBJECT) {
604 __ verify_oop(src->as_register());
605 }
606 move_regs(src->as_register(), dest->as_register());
607 } else if (dest->is_double_cpu()) {
608 if (is_reference_type(src->type())) {
609 __ verify_oop(src->as_register());
610 move_regs(src->as_register(), dest->as_register_lo());
611 return;
612 }
613 assert(src->is_double_cpu(), "must match");
614 Register f_lo = src->as_register_lo();
615 Register f_hi = src->as_register_hi();
616 Register t_lo = dest->as_register_lo();
617 Register t_hi = dest->as_register_hi();
618 assert(f_hi == f_lo, "must be same");
619 assert(t_hi == t_lo, "must be same");
620 move_regs(f_lo, t_lo);
621 } else if (dest->is_single_fpu()) {
622 assert(src->is_single_fpu(), "expect single fpu");
623 __ fmv_s(dest->as_float_reg(), src->as_float_reg());
624 } else if (dest->is_double_fpu()) {
625 assert(src->is_double_fpu(), "expect double fpu");
626 __ fmv_d(dest->as_double_reg(), src->as_double_reg());
627 } else {
628 ShouldNotReachHere();
629 }
630 }
631
632 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
633 precond(src->is_register() && dest->is_stack());
634
635 uint const c_sz32 = sizeof(uint32_t);
636 uint const c_sz64 = sizeof(uint64_t);
637
638 assert(src->is_register(), "should not call otherwise");
639 assert(dest->is_stack(), "should not call otherwise");
640 if (src->is_single_cpu()) {
641 int index = dest->single_stack_ix();
642 if (is_reference_type(type)) {
643 __ sd(src->as_register(), stack_slot_address(index, c_sz64));
644 __ verify_oop(src->as_register());
645 } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
646 __ sd(src->as_register(), stack_slot_address(index, c_sz64));
647 } else {
648 __ sw(src->as_register(), stack_slot_address(index, c_sz32));
649 }
650 } else if (src->is_double_cpu()) {
651 int index = dest->double_stack_ix();
652 Address dest_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes);
653 __ sd(src->as_register_lo(), dest_addr_LO);
654 } else if (src->is_single_fpu()) {
655 int index = dest->single_stack_ix();
656 __ fsw(src->as_float_reg(), stack_slot_address(index, c_sz32));
657 } else if (src->is_double_fpu()) {
658 int index = dest->double_stack_ix();
659 __ fsd(src->as_double_reg(), stack_slot_address(index, c_sz64));
660 } else {
661 ShouldNotReachHere();
662 }
663 }
664
665 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
666 LIR_Address* to_addr = dest->as_address_ptr();
667 // t0 was used as tmp reg in as_Address, so we use t1 as compressed_src
668 Register compressed_src = t1;
669
670 if (patch_code != lir_patch_none) {
671 deoptimize_trap(info);
672 return;
673 }
674
675 if (is_reference_type(type)) {
676 __ verify_oop(src->as_register());
677
678 if (UseCompressedOops && !wide) {
679 __ encode_heap_oop(compressed_src, src->as_register());
680 } else {
681 compressed_src = src->as_register();
682 }
683 }
684
685 int null_check_here = code_offset();
686
687 switch (type) {
688 case T_FLOAT:
689 __ fsw(src->as_float_reg(), as_Address(to_addr));
690 break;
691
692 case T_DOUBLE:
693 __ fsd(src->as_double_reg(), as_Address(to_addr));
694 break;
695
696 case T_ARRAY: // fall through
697 case T_OBJECT:
698 if (UseCompressedOops && !wide) {
699 __ sw(compressed_src, as_Address(to_addr));
700 } else {
701 __ sd(compressed_src, as_Address(to_addr));
702 }
703 break;
704 case T_METADATA:
705 // We get here to store a method pointer to the stack to pass to
706 // a dtrace runtime call. This can't work on 64 bit with
707 // compressed klass ptrs: T_METADATA can be compressed klass
708 // ptr or a 64 bit method pointer.
709 ShouldNotReachHere();
710 __ sd(src->as_register(), as_Address(to_addr));
711 break;
712 case T_ADDRESS:
713 __ sd(src->as_register(), as_Address(to_addr));
714 break;
715 case T_INT:
716 __ sw(src->as_register(), as_Address(to_addr));
717 break;
718 case T_LONG:
719 __ sd(src->as_register_lo(), as_Address(to_addr));
720 break;
721 case T_BYTE: // fall through
722 case T_BOOLEAN:
723 __ sb(src->as_register(), as_Address(to_addr));
724 break;
725 case T_CHAR: // fall through
726 case T_SHORT:
727 __ sh(src->as_register(), as_Address(to_addr));
728 break;
729 default:
730 ShouldNotReachHere();
731 }
732
733 if (info != nullptr) {
734 add_debug_info_for_null_check(null_check_here, info);
735 }
736 }
737
738 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
739 precond(src->is_stack() && dest->is_register());
740
741 uint const c_sz32 = sizeof(uint32_t);
742 uint const c_sz64 = sizeof(uint64_t);
743
744 if (dest->is_single_cpu()) {
745 int index = src->single_stack_ix();
746 if (type == T_INT) {
747 __ lw(dest->as_register(), stack_slot_address(index, c_sz32));
748 } else if (is_reference_type(type)) {
749 __ ld(dest->as_register(), stack_slot_address(index, c_sz64));
750 __ verify_oop(dest->as_register());
751 } else if (type == T_METADATA || type == T_ADDRESS) {
752 __ ld(dest->as_register(), stack_slot_address(index, c_sz64));
753 } else {
754 __ lwu(dest->as_register(), stack_slot_address(index, c_sz32));
755 }
756 } else if (dest->is_double_cpu()) {
757 int index = src->double_stack_ix();
758 Address src_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes);
759 __ ld(dest->as_register_lo(), src_addr_LO);
760 } else if (dest->is_single_fpu()) {
761 int index = src->single_stack_ix();
762 __ flw(dest->as_float_reg(), stack_slot_address(index, c_sz32));
763 } else if (dest->is_double_fpu()) {
764 int index = src->double_stack_ix();
765 __ fld(dest->as_double_reg(), stack_slot_address(index, c_sz64));
766 } else {
767 ShouldNotReachHere();
768 }
769 }
770
771 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
772 deoptimize_trap(info);
773 }
774
775 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
776 LIR_Opr temp;
777 if (type == T_LONG || type == T_DOUBLE) {
778 temp = FrameMap::t1_long_opr;
779 } else {
780 temp = FrameMap::t1_opr;
781 }
782
783 stack2reg(src, temp, src->type());
784 reg2stack(temp, dest, dest->type(), false);
785 }
786
787 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
788 assert(src->is_address(), "should not call otherwise");
789 assert(dest->is_register(), "should not call otherwise");
790
791 LIR_Address* addr = src->as_address_ptr();
792 LIR_Address* from_addr = src->as_address_ptr();
793
794 if (addr->base()->type() == T_OBJECT) {
795 __ verify_oop(addr->base()->as_pointer_register());
796 }
797
798 if (patch_code != lir_patch_none) {
799 deoptimize_trap(info);
800 return;
801 }
802
803 if (info != nullptr) {
804 add_debug_info_for_null_check_here(info);
805 }
806
807 int null_check_here = code_offset();
808 switch (type) {
809 case T_FLOAT:
810 __ flw(dest->as_float_reg(), as_Address(from_addr));
811 break;
812 case T_DOUBLE:
813 __ fld(dest->as_double_reg(), as_Address(from_addr));
814 break;
815 case T_ARRAY: // fall through
816 case T_OBJECT:
817 if (UseCompressedOops && !wide) {
818 __ lwu(dest->as_register(), as_Address(from_addr));
819 } else {
820 __ ld(dest->as_register(), as_Address(from_addr));
821 }
822 break;
823 case T_METADATA:
824 // We get here to store a method pointer to the stack to pass to
825 // a dtrace runtime call. This can't work on 64 bit with
826 // compressed klass ptrs: T_METADATA can be a compressed klass
827 // ptr or a 64 bit method pointer.
828 ShouldNotReachHere();
829 __ ld(dest->as_register(), as_Address(from_addr));
830 break;
831 case T_ADDRESS:
832 __ ld(dest->as_register(), as_Address(from_addr));
833 break;
834 case T_INT:
835 __ lw(dest->as_register(), as_Address(from_addr));
836 break;
837 case T_LONG:
838 __ ld(dest->as_register_lo(), as_Address_lo(from_addr));
839 break;
840 case T_BYTE:
841 __ lb(dest->as_register(), as_Address(from_addr));
842 break;
843 case T_BOOLEAN:
844 __ lbu(dest->as_register(), as_Address(from_addr));
845 break;
846 case T_CHAR:
847 __ lhu(dest->as_register(), as_Address(from_addr));
848 break;
849 case T_SHORT:
850 __ lh(dest->as_register(), as_Address(from_addr));
851 break;
852 default:
853 ShouldNotReachHere();
854 }
855
856 if (is_reference_type(type)) {
857 if (UseCompressedOops && !wide) {
858 __ decode_heap_oop(dest->as_register());
859 }
860
861 if (!(UseZGC && !ZGenerational)) {
862 // Load barrier has not yet been applied, so ZGC can't verify the oop here
863 __ verify_oop(dest->as_register());
864 }
865 }
866 }
867
868 void LIR_Assembler::emit_op3(LIR_Op3* op) {
869 switch (op->code()) {
870 case lir_idiv: // fall through
871 case lir_irem:
872 arithmetic_idiv(op->code(),
873 op->in_opr1(),
874 op->in_opr2(),
875 op->in_opr3(),
876 op->result_opr(),
877 op->info());
878 break;
879 case lir_fmad:
880 __ fmadd_d(op->result_opr()->as_double_reg(),
881 op->in_opr1()->as_double_reg(),
882 op->in_opr2()->as_double_reg(),
883 op->in_opr3()->as_double_reg());
884 break;
885 case lir_fmaf:
886 __ fmadd_s(op->result_opr()->as_float_reg(),
887 op->in_opr1()->as_float_reg(),
888 op->in_opr2()->as_float_reg(),
889 op->in_opr3()->as_float_reg());
890 break;
891 default:
892 ShouldNotReachHere();
893 }
894 }
895
896 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
897 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
898 Label label;
899
900 emit_branch(condition, cmp_opr1, cmp_opr2, label, /* is_far */ false,
901 /* is_unordered */ (condition == lir_cond_greaterEqual || condition == lir_cond_greater) ? false : true);
902
903 Label done;
904 move_op(opr2, result, type, lir_patch_none, nullptr,
905 false, // pop_fpu_stack
906 false); // wide
907 __ j(done);
908 __ bind(label);
909 move_op(opr1, result, type, lir_patch_none, nullptr,
910 false, // pop_fpu_stack
911 false); // wide
912 __ bind(done);
913 }
914
915 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
916 LIR_Condition condition = op->cond();
917 if (condition == lir_cond_always) {
918 if (op->info() != nullptr) {
919 add_debug_info_for_branch(op->info());
920 }
921 } else {
922 assert(op->in_opr1() != LIR_OprFact::illegalOpr && op->in_opr2() != LIR_OprFact::illegalOpr, "conditional branches must have legal operands");
923 }
924 bool is_unordered = (op->ublock() == op->block());
925 emit_branch(condition, op->in_opr1(), op->in_opr2(), *op->label(), /* is_far */ true, is_unordered);
926 }
927
928 void LIR_Assembler::emit_branch(LIR_Condition cmp_flag, LIR_Opr cmp1, LIR_Opr cmp2, Label& label,
929 bool is_far, bool is_unordered) {
930
931 if (cmp_flag == lir_cond_always) {
932 __ j(label);
933 return;
934 }
935
936 if (cmp1->is_cpu_register()) {
937 Register reg1 = as_reg(cmp1);
938 if (cmp2->is_cpu_register()) {
939 Register reg2 = as_reg(cmp2);
940 __ c1_cmp_branch(cmp_flag, reg1, reg2, label, cmp1->type(), is_far);
941 } else if (cmp2->is_constant()) {
942 const2reg_helper(cmp2);
943 __ c1_cmp_branch(cmp_flag, reg1, t0, label, cmp2->type(), is_far);
944 } else {
945 ShouldNotReachHere();
946 }
947 } else if (cmp1->is_single_fpu()) {
948 assert(cmp2->is_single_fpu(), "expect single float register");
949 __ c1_float_cmp_branch(cmp_flag, cmp1->as_float_reg(), cmp2->as_float_reg(), label, is_far, is_unordered);
950 } else if (cmp1->is_double_fpu()) {
951 assert(cmp2->is_double_fpu(), "expect double float register");
952 __ c1_float_cmp_branch(cmp_flag | C1_MacroAssembler::c1_double_branch_mask,
953 cmp1->as_double_reg(), cmp2->as_double_reg(), label, is_far, is_unordered);
954 } else {
955 ShouldNotReachHere();
956 }
957 }
958
959 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
960 LIR_Opr src = op->in_opr();
961 LIR_Opr dest = op->result_opr();
962
963 switch (op->bytecode()) {
964 case Bytecodes::_i2f:
965 __ fcvt_s_w(dest->as_float_reg(), src->as_register()); break;
966 case Bytecodes::_i2d:
967 __ fcvt_d_w(dest->as_double_reg(), src->as_register()); break;
968 case Bytecodes::_l2d:
969 __ fcvt_d_l(dest->as_double_reg(), src->as_register_lo()); break;
970 case Bytecodes::_l2f:
971 __ fcvt_s_l(dest->as_float_reg(), src->as_register_lo()); break;
972 case Bytecodes::_f2d:
973 __ fcvt_d_s(dest->as_double_reg(), src->as_float_reg()); break;
974 case Bytecodes::_d2f:
975 __ fcvt_s_d(dest->as_float_reg(), src->as_double_reg()); break;
976 case Bytecodes::_i2c:
977 __ zero_extend(dest->as_register(), src->as_register(), 16); break;
978 case Bytecodes::_i2l:
979 __ sign_extend(dest->as_register_lo(), src->as_register(), 32); break;
980 case Bytecodes::_i2s:
981 __ sign_extend(dest->as_register(), src->as_register(), 16); break;
982 case Bytecodes::_i2b:
983 __ sign_extend(dest->as_register(), src->as_register(), 8); break;
984 case Bytecodes::_l2i:
985 __ sign_extend(dest->as_register(), src->as_register_lo(), 32); break;
986 case Bytecodes::_d2l:
987 __ fcvt_l_d_safe(dest->as_register_lo(), src->as_double_reg()); break;
988 case Bytecodes::_f2i:
989 __ fcvt_w_s_safe(dest->as_register(), src->as_float_reg()); break;
990 case Bytecodes::_f2l:
991 __ fcvt_l_s_safe(dest->as_register_lo(), src->as_float_reg()); break;
992 case Bytecodes::_d2i:
993 __ fcvt_w_d_safe(dest->as_register(), src->as_double_reg()); break;
994 default:
995 ShouldNotReachHere();
996 }
997 }
998
999 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1000 if (op->init_check()) {
1001 __ lbu(t0, Address(op->klass()->as_register(),
1002 InstanceKlass::init_state_offset()));
1003 __ mv(t1, (u1)InstanceKlass::fully_initialized);
1004 add_debug_info_for_null_check_here(op->stub()->info());
1005 __ bne(t0, t1, *op->stub()->entry(), /* is_far */ true);
1006 }
1007
1008 __ allocate_object(op->obj()->as_register(),
1009 op->tmp1()->as_register(),
1010 op->tmp2()->as_register(),
1011 op->header_size(),
1012 op->object_size(),
1013 op->klass()->as_register(),
1014 *op->stub()->entry());
1015
1016 __ bind(*op->stub()->continuation());
1017 }
1018
1019 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1020 Register len = op->len()->as_register();
1021
1022 if (UseSlowPath ||
1023 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1024 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1025 __ j(*op->stub()->entry());
1026 } else {
1027 Register tmp1 = op->tmp1()->as_register();
1028 Register tmp2 = op->tmp2()->as_register();
1029 Register tmp3 = op->tmp3()->as_register();
1030 if (len == tmp1) {
1031 tmp1 = tmp3;
1032 } else if (len == tmp2) {
1033 tmp2 = tmp3;
1034 } else if (len == tmp3) {
1035 // everything is ok
1036 } else {
1037 __ mv(tmp3, len);
1038 }
1039 __ allocate_array(op->obj()->as_register(),
1040 len,
1041 tmp1,
1042 tmp2,
1043 arrayOopDesc::header_size(op->type()),
1044 array_element_size(op->type()),
1045 op->klass()->as_register(),
1046 *op->stub()->entry());
1047 }
1048 __ bind(*op->stub()->continuation());
1049 }
1050
1051 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data,
1052 Register recv, Label* update_done) {
1053 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1054 Label next_test;
1055 // See if the receiver is receiver[n].
1056 __ ld(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1057 __ bne(recv, t1, next_test);
1058 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1059 __ increment(data_addr, DataLayout::counter_increment);
1060 __ j(*update_done);
1061 __ bind(next_test);
1062 }
1063
1064 // Didn't find receiver; find next empty slot and fill it in
1065 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1066 Label next_test;
1067 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1068 __ ld(t1, recv_addr);
1069 __ bnez(t1, next_test);
1070 __ sd(recv, recv_addr);
1071 __ mv(t1, DataLayout::counter_increment);
1072 __ sd(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1073 __ j(*update_done);
1074 __ bind(next_test);
1075 }
1076 }
1077
1078 void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) {
1079 ciMethod* method = op->profiled_method();
1080 assert(method != nullptr, "Should have method");
1081 int bci = op->profiled_bci();
1082 *md = method->method_data_or_null();
1083 guarantee(*md != nullptr, "Sanity");
1084 *data = ((*md)->bci_to_data(bci));
1085 assert(*data != nullptr, "need data for type check");
1086 assert((*data)->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1087 }
1088
1089 void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Register Rtmp1,
1090 Register k_RInfo, Register klass_RInfo,
1091 Label *failure_target, Label *success_target) {
1092 // get object class
1093 // not a safepoint as obj null check happens earlier
1094 __ load_klass(klass_RInfo, obj);
1095 if (k->is_loaded()) {
1096 // See if we get an immediate positive hit
1097 __ ld(t0, Address(klass_RInfo, int64_t(k->super_check_offset())));
1098 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1099 __ bne(k_RInfo, t0, *failure_target, /* is_far */ true);
1100 // successful cast, fall through to profile or jump
1101 } else {
1102 // See if we get an immediate positive hit
1103 __ beq(k_RInfo, t0, *success_target);
1104 // check for self
1105 __ beq(klass_RInfo, k_RInfo, *success_target);
1106
1107 __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
1108 __ sd(k_RInfo, Address(sp, 0)); // sub klass
1109 __ sd(klass_RInfo, Address(sp, wordSize)); // super klass
1110 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1111 // load result to k_RInfo
1112 __ ld(k_RInfo, Address(sp, 0));
1113 __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
1114 // result is a boolean
1115 __ beqz(k_RInfo, *failure_target, /* is_far */ true);
1116 // successful cast, fall through to profile or jump
1117 }
1118 } else {
1119 // perform the fast part of the checking logic
1120 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1121 // call out-of-line instance of __ check_klass_subtytpe_slow_path(...)
1122 __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
1123 __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass
1124 __ sd(k_RInfo, Address(sp, 0)); // super klass
1125 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1126 // load result to k_RInfo
1127 __ ld(k_RInfo, Address(sp, 0));
1128 __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
1129 // result is a boolean
1130 __ beqz(k_RInfo, *failure_target, /* is_far */ true);
1131 // successful cast, fall thriugh to profile or jump
1132 }
1133 }
1134
1135 void LIR_Assembler::profile_object(ciMethodData* md, ciProfileData* data, Register obj,
1136 Register klass_RInfo, Label* obj_is_null) {
1137 Label not_null;
1138 __ bnez(obj, not_null);
1139 // Object is null, update MDO and exit
1140 Register mdo = klass_RInfo;
1141 __ mov_metadata(mdo, md->constant_encoding());
1142 Address data_addr = __ form_address(t1, mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1143 __ lbu(t0, data_addr);
1144 __ ori(t0, t0, BitData::null_seen_byte_constant());
1145 __ sb(t0, data_addr);
1146 __ j(*obj_is_null);
1147 __ bind(not_null);
1148 }
1149
1150 void LIR_Assembler::typecheck_loaded(LIR_OpTypeCheck *op, ciKlass* k, Register k_RInfo) {
1151 if (!k->is_loaded()) {
1152 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1153 } else {
1154 __ mov_metadata(k_RInfo, k->constant_encoding());
1155 }
1156 }
1157
1158 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1159 Register obj = op->object()->as_register();
1160 Register k_RInfo = op->tmp1()->as_register();
1161 Register klass_RInfo = op->tmp2()->as_register();
1162 Register dst = op->result_opr()->as_register();
1163 ciKlass* k = op->klass();
1164 Register Rtmp1 = noreg;
1165
1166 // check if it needs to be profiled
1167 ciMethodData* md = nullptr;
1168 ciProfileData* data = nullptr;
1169
1170 const bool should_profile = op->should_profile();
1171 if (should_profile) {
1172 data_check(op, &md, &data);
1173 }
1174 Label profile_cast_success, profile_cast_failure;
1175 Label *success_target = should_profile ? &profile_cast_success : success;
1176 Label *failure_target = should_profile ? &profile_cast_failure : failure;
1177
1178 if (obj == k_RInfo) {
1179 k_RInfo = dst;
1180 } else if (obj == klass_RInfo) {
1181 klass_RInfo = dst;
1182 }
1183 if (k->is_loaded() && !UseCompressedClassPointers) {
1184 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1185 } else {
1186 Rtmp1 = op->tmp3()->as_register();
1187 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1188 }
1189
1190 assert_different_registers(obj, k_RInfo, klass_RInfo);
1191
1192 if (should_profile) {
1193 profile_object(md, data, obj, klass_RInfo, obj_is_null);
1194 } else {
1195 __ beqz(obj, *obj_is_null);
1196 }
1197
1198 typecheck_loaded(op, k, k_RInfo);
1199 __ verify_oop(obj);
1200
1201 if (op->fast_check()) {
1202 // get object class
1203 // not a safepoint as obj null check happens earlier
1204 __ load_klass(t0, obj, t1);
1205 __ bne(t0, k_RInfo, *failure_target, /* is_far */ true);
1206 // successful cast, fall through to profile or jump
1207 } else {
1208 typecheck_helper_slowcheck(k, obj, Rtmp1, k_RInfo, klass_RInfo, failure_target, success_target);
1209 }
1210 if (should_profile) {
1211 type_profile(obj, md, klass_RInfo, k_RInfo, data, success, failure, profile_cast_success, profile_cast_failure);
1212 }
1213 __ j(*success);
1214 }
1215
1216 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1217 const bool should_profile = op->should_profile();
1218
1219 LIR_Code code = op->code();
1220 if (code == lir_store_check) {
1221 typecheck_lir_store(op, should_profile);
1222 } else if (code == lir_checkcast) {
1223 Register obj = op->object()->as_register();
1224 Register dst = op->result_opr()->as_register();
1225 Label success;
1226 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1227 __ bind(success);
1228 if (dst != obj) {
1229 __ mv(dst, obj);
1230 }
1231 } else if (code == lir_instanceof) {
1232 Register obj = op->object()->as_register();
1233 Register dst = op->result_opr()->as_register();
1234 Label success, failure, done;
1235 emit_typecheck_helper(op, &success, &failure, &failure);
1236 __ bind(failure);
1237 __ mv(dst, zr);
1238 __ j(done);
1239 __ bind(success);
1240 __ mv(dst, 1);
1241 __ bind(done);
1242 } else {
1243 ShouldNotReachHere();
1244 }
1245 }
1246
1247 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1248 assert(VM_Version::supports_cx8(), "wrong machine");
1249 Register addr;
1250 if (op->addr()->is_register()) {
1251 addr = as_reg(op->addr());
1252 } else {
1253 assert(op->addr()->is_address(), "what else?");
1254 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1255 assert(addr_ptr->disp() == 0, "need 0 disp");
1256 assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index");
1257 addr = as_reg(addr_ptr->base());
1258 }
1259 Register newval = as_reg(op->new_value());
1260 Register cmpval = as_reg(op->cmp_value());
1261
1262 if (op->code() == lir_cas_obj) {
1263 if (UseCompressedOops) {
1264 Register tmp1 = op->tmp1()->as_register();
1265 assert(op->tmp1()->is_valid(), "must be");
1266 Register tmp2 = op->tmp2()->as_register();
1267 assert(op->tmp2()->is_valid(), "must be");
1268
1269 __ encode_heap_oop(tmp1, cmpval);
1270 cmpval = tmp1;
1271 __ encode_heap_oop(tmp2, newval);
1272 newval = tmp2;
1273 caswu(addr, newval, cmpval);
1274 } else {
1275 casl(addr, newval, cmpval);
1276 }
1277 } else if (op->code() == lir_cas_int) {
1278 casw(addr, newval, cmpval);
1279 } else {
1280 casl(addr, newval, cmpval);
1281 }
1282
1283 if (op->result_opr()->is_valid()) {
1284 assert(op->result_opr()->is_register(), "need a register");
1285 __ mv(as_reg(op->result_opr()), t0); // cas result in t0, and 0 for success
1286 }
1287 }
1288
1289 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1290 switch (code) {
1291 case lir_abs: __ fabs_d(dest->as_double_reg(), value->as_double_reg()); break;
1292 case lir_sqrt: __ fsqrt_d(dest->as_double_reg(), value->as_double_reg()); break;
1293 default: ShouldNotReachHere();
1294 }
1295 }
1296
1297 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1298 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1299 Register Rleft = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
1300 if (dst->is_single_cpu()) {
1301 Register Rdst = dst->as_register();
1302 if (right->is_constant()) {
1303 int right_const = right->as_jint();
1304 if (Assembler::is_simm12(right_const)) {
1305 logic_op_imm(Rdst, Rleft, right_const, code);
1306 __ sign_extend(Rdst, Rdst, 32);
1307 } else {
1308 __ mv(t0, right_const);
1309 logic_op_reg32(Rdst, Rleft, t0, code);
1310 }
1311 } else {
1312 Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo();
1313 logic_op_reg32(Rdst, Rleft, Rright, code);
1314 }
1315 } else {
1316 Register Rdst = dst->as_register_lo();
1317 if (right->is_constant()) {
1318 long right_const = right->as_jlong();
1319 if (Assembler::is_simm12(right_const)) {
1320 logic_op_imm(Rdst, Rleft, right_const, code);
1321 } else {
1322 __ mv(t0, right_const);
1323 logic_op_reg(Rdst, Rleft, t0, code);
1324 }
1325 } else {
1326 Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo();
1327 logic_op_reg(Rdst, Rleft, Rright, code);
1328 }
1329 }
1330 }
1331
1332 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op) {
1333 ShouldNotCallThis();
1334 }
1335
1336 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
1337 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1338 bool is_unordered_less = (code == lir_ucmp_fd2i);
1339 if (left->is_single_fpu()) {
1340 __ float_cmp(true, is_unordered_less ? -1 : 1,
1341 left->as_float_reg(), right->as_float_reg(), dst->as_register());
1342 } else if (left->is_double_fpu()) {
1343 __ float_cmp(false, is_unordered_less ? -1 : 1,
1344 left->as_double_reg(), right->as_double_reg(), dst->as_register());
1345 } else {
1346 ShouldNotReachHere();
1347 }
1348 } else if (code == lir_cmp_l2i) {
1349 __ cmp_l2i(dst->as_register(), left->as_register_lo(), right->as_register_lo());
1350 } else {
1351 ShouldNotReachHere();
1352 }
1353 }
1354
1355 void LIR_Assembler::align_call(LIR_Code code) {
1356 // With RVC a call instruction may get 2-byte aligned.
1357 // The address of the call instruction needs to be 4-byte aligned to
1358 // ensure that it does not span a cache line so that it can be patched.
1359 __ align(NativeInstruction::instruction_size);
1360 }
1361
1362 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1363 address call = __ trampoline_call(Address(op->addr(), rtype));
1364 if (call == nullptr) {
1365 bailout("trampoline stub overflow");
1366 return;
1367 }
1368 add_call_info(code_offset(), op->info());
1369 __ post_call_nop();
1370 }
1371
1372 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1373 address call = __ ic_call(op->addr());
1374 if (call == nullptr) {
1375 bailout("trampoline stub overflow");
1376 return;
1377 }
1378 add_call_info(code_offset(), op->info());
1379 __ post_call_nop();
1380 }
1381
1382 void LIR_Assembler::emit_static_call_stub() {
1383 address call_pc = __ pc();
1384 MacroAssembler::assert_alignment(call_pc);
1385 address stub = __ start_a_stub(call_stub_size());
1386 if (stub == nullptr) {
1387 bailout("static call stub overflow");
1388 return;
1389 }
1390
1391 int start = __ offset();
1392
1393 __ relocate(static_stub_Relocation::spec(call_pc));
1394 __ emit_static_call_stub();
1395
1396 assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
1397 <= call_stub_size(), "stub too big");
1398 __ end_a_stub();
1399 }
1400
1401 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1402 assert(exceptionOop->as_register() == x10, "must match");
1403 assert(exceptionPC->as_register() == x13, "must match");
1404
1405 // exception object is not added to oop map by LinearScan
1406 // (LinearScan assumes that no oops are in fixed registers)
1407 info->add_register_oop(exceptionOop);
1408 Runtime1::StubID unwind_id;
1409
1410 // get current pc information
1411 // pc is only needed if the method has an exception handler, the unwind code does not need it.
1412 if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
1413 // As no instructions have been generated yet for this LIR node it's
1414 // possible that an oop map already exists for the current offset.
1415 // In that case insert an dummy NOP here to ensure all oop map PCs
1416 // are unique. See JDK-8237483.
1417 __ nop();
1418 }
1419 int pc_for_athrow_offset = __ offset();
1420 InternalAddress pc_for_athrow(__ pc());
1421 __ relocate(pc_for_athrow.rspec(), [&] {
1422 int32_t offset;
1423 __ la_patchable(exceptionPC->as_register(), pc_for_athrow, offset);
1424 __ addi(exceptionPC->as_register(), exceptionPC->as_register(), offset);
1425 });
1426 add_call_info(pc_for_athrow_offset, info); // for exception handler
1427
1428 __ verify_not_null_oop(x10);
1429 // search an exception handler (x10: exception oop, x13: throwing pc)
1430 if (compilation()->has_fpu_code()) {
1431 unwind_id = Runtime1::handle_exception_id;
1432 } else {
1433 unwind_id = Runtime1::handle_exception_nofpu_id;
1434 }
1435 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
1436 __ nop();
1437 }
1438
1439 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1440 assert(exceptionOop->as_register() == x10, "must match");
1441 __ j(_unwind_handler_entry);
1442 }
1443
1444 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
1445 Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
1446 Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
1447 Register count_reg = count->as_register();
1448 if (dest->is_single_cpu()) {
1449 assert (dest->type() == T_INT, "unexpected result type");
1450 assert (left->type() == T_INT, "unexpected left type");
1451 __ andi(t0, count_reg, 31); // should not shift more than 31 bits
1452 switch (code) {
1453 case lir_shl: __ sllw(dest_reg, left_reg, t0); break;
1454 case lir_shr: __ sraw(dest_reg, left_reg, t0); break;
1455 case lir_ushr: __ srlw(dest_reg, left_reg, t0); break;
1456 default: ShouldNotReachHere();
1457 }
1458 } else if (dest->is_double_cpu()) {
1459 __ andi(t0, count_reg, 63); // should not shift more than 63 bits
1460 switch (code) {
1461 case lir_shl: __ sll(dest_reg, left_reg, t0); break;
1462 case lir_shr: __ sra(dest_reg, left_reg, t0); break;
1463 case lir_ushr: __ srl(dest_reg, left_reg, t0); break;
1464 default: ShouldNotReachHere();
1465 }
1466 } else {
1467 ShouldNotReachHere();
1468 }
1469 }
1470
1471 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
1472 Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
1473 Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
1474 if (dest->is_single_cpu()) {
1475 assert (dest->type() == T_INT, "unexpected result type");
1476 assert (left->type() == T_INT, "unexpected left type");
1477 count &= 0x1f;
1478 if (count != 0) {
1479 switch (code) {
1480 case lir_shl: __ slliw(dest_reg, left_reg, count); break;
1481 case lir_shr: __ sraiw(dest_reg, left_reg, count); break;
1482 case lir_ushr: __ srliw(dest_reg, left_reg, count); break;
1483 default: ShouldNotReachHere();
1484 }
1485 } else {
1486 move_regs(left_reg, dest_reg);
1487 }
1488 } else if (dest->is_double_cpu()) {
1489 count &= 0x3f;
1490 if (count != 0) {
1491 switch (code) {
1492 case lir_shl: __ slli(dest_reg, left_reg, count); break;
1493 case lir_shr: __ srai(dest_reg, left_reg, count); break;
1494 case lir_ushr: __ srli(dest_reg, left_reg, count); break;
1495 default: ShouldNotReachHere();
1496 }
1497 } else {
1498 move_regs(left->as_register_lo(), dest->as_register_lo());
1499 }
1500 } else {
1501 ShouldNotReachHere();
1502 }
1503 }
1504
1505 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
1506 Register obj = op->obj_opr()->as_register(); // may not be an oop
1507 Register hdr = op->hdr_opr()->as_register();
1508 Register lock = op->lock_opr()->as_register();
1509 Register temp = op->scratch_opr()->as_register();
1510 if (LockingMode == LM_MONITOR) {
1511 if (op->info() != nullptr) {
1512 add_debug_info_for_null_check_here(op->info());
1513 __ null_check(obj, -1);
1514 }
1515 __ j(*op->stub()->entry());
1516 } else if (op->code() == lir_lock) {
1517 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
1518 // add debug info for NullPointerException only if one is possible
1519 int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
1520 if (op->info() != nullptr) {
1521 add_debug_info_for_null_check(null_check_offset, op->info());
1522 }
1523 } else if (op->code() == lir_unlock) {
1524 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
1525 __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry());
1526 } else {
1527 Unimplemented();
1528 }
1529 __ bind(*op->stub()->continuation());
1530 }
1531
1532 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
1533 Register obj = op->obj()->as_pointer_register();
1534 Register result = op->result_opr()->as_pointer_register();
1535
1536 CodeEmitInfo* info = op->info();
1537 if (info != nullptr) {
1538 add_debug_info_for_null_check_here(info);
1539 }
1540
1541 if (UseCompressedClassPointers) {
1542 __ lwu(result, Address(obj, oopDesc::klass_offset_in_bytes()));
1543 __ decode_klass_not_null(result);
1544 } else {
1545 __ ld(result, Address(obj, oopDesc::klass_offset_in_bytes()));
1546 }
1547 }
1548
1549 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
1550 ciMethod* method = op->profiled_method();
1551 int bci = op->profiled_bci();
1552
1553 // Update counter for all call types
1554 ciMethodData* md = method->method_data_or_null();
1555 guarantee(md != nullptr, "Sanity");
1556 ciProfileData* data = md->bci_to_data(bci);
1557 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
1558 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
1559 Register mdo = op->mdo()->as_register();
1560 __ mov_metadata(mdo, md->constant_encoding());
1561 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1562 // Perform additional virtual call profiling for invokevirtual and
1563 // invokeinterface bytecodes
1564 if (op->should_profile_receiver_type()) {
1565 assert(op->recv()->is_single_cpu(), "recv must be allocated");
1566 Register recv = op->recv()->as_register();
1567 assert_different_registers(mdo, recv);
1568 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
1569 ciKlass* known_klass = op->known_holder();
1570 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
1571 // We know the type that will be seen at this call site; we can
1572 // statically update the MethodData* rather than needing to do
1573 // dynamic tests on the receiver type
1574 // NOTE: we should probably put a lock around this search to
1575 // avoid collisions by concurrent compilations
1576 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
1577 uint i;
1578 for (i = 0; i < VirtualCallData::row_limit(); i++) {
1579 ciKlass* receiver = vc_data->receiver(i);
1580 if (known_klass->equals(receiver)) {
1581 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
1582 __ increment(data_addr, DataLayout::counter_increment);
1583 return;
1584 }
1585 }
1586
1587 // Receiver type not found in profile data; select an empty slot
1588 // Note that this is less efficient than it should be because it
1589 // always does a write to the receiver part of the
1590 // VirtualCallData rather than just the first time
1591 for (i = 0; i < VirtualCallData::row_limit(); i++) {
1592 ciKlass* receiver = vc_data->receiver(i);
1593 if (receiver == nullptr) {
1594 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
1595 __ mov_metadata(t1, known_klass->constant_encoding());
1596 __ sd(t1, recv_addr);
1597 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
1598 __ increment(data_addr, DataLayout::counter_increment);
1599 return;
1600 }
1601 }
1602 } else {
1603 __ load_klass(recv, recv);
1604 Label update_done;
1605 type_profile_helper(mdo, md, data, recv, &update_done);
1606 // Receiver did not match any saved receiver and there is no empty row for it.
1607 // Increment total counter to indicate polymorphic case.
1608 __ increment(counter_addr, DataLayout::counter_increment);
1609
1610 __ bind(update_done);
1611 }
1612 } else {
1613 // Static call
1614 __ increment(counter_addr, DataLayout::counter_increment);
1615 }
1616 }
1617
1618 void LIR_Assembler::emit_delay(LIR_OpDelay*) { Unimplemented(); }
1619
1620 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
1621 __ la(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
1622 }
1623
1624 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { Unimplemented(); }
1625
1626 void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
1627 Register tmp, Label &next, Label &none,
1628 Address mdo_addr) {
1629 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
1630 if (exact_klass != nullptr) {
1631 __ mov_metadata(tmp, exact_klass->constant_encoding());
1632 } else {
1633 __ load_klass(tmp, tmp);
1634 }
1635
1636 __ ld(t1, mdo_addr);
1637 __ xorr(tmp, tmp, t1);
1638 __ andi(t0, tmp, TypeEntries::type_klass_mask);
1639 // klass seen before, nothing to do. The unknown bit may have been
1640 // set already but no need to check.
1641 __ beqz(t0, next);
1642
1643 // already unknown. Nothing to do anymore.
1644 __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown));
1645 __ bnez(t0, next);
1646
1647 if (TypeEntries::is_type_none(current_klass)) {
1648 __ beqz(t1, none);
1649 __ mv(t0, (u1)TypeEntries::null_seen);
1650 __ beq(t0, t1, none);
1651 // There is a chance that the checks above
1652 // fail if another thread has just set the
1653 // profiling to this obj's klass
1654 __ membar(MacroAssembler::LoadLoad);
1655 __ xorr(tmp, tmp, t1); // get back original value before XOR
1656 __ ld(t1, mdo_addr);
1657 __ xorr(tmp, tmp, t1);
1658 __ andi(t0, tmp, TypeEntries::type_klass_mask);
1659 __ beqz(t0, next);
1660 }
1661 } else {
1662 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
1663 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
1664
1665 __ ld(tmp, mdo_addr);
1666 // already unknown. Nothing to do anymore.
1667 __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown));
1668 __ bnez(t0, next);
1669 }
1670
1671 // different than before. Cannot keep accurate profile.
1672 __ ld(t1, mdo_addr);
1673 __ ori(t1, t1, TypeEntries::type_unknown);
1674 __ sd(t1, mdo_addr);
1675
1676 if (TypeEntries::is_type_none(current_klass)) {
1677 __ j(next);
1678
1679 __ bind(none);
1680 // first time here. Set profile type.
1681 __ sd(tmp, mdo_addr);
1682 #ifdef ASSERT
1683 __ andi(tmp, tmp, TypeEntries::type_mask);
1684 __ verify_klass_ptr(tmp);
1685 #endif
1686 }
1687 }
1688
1689 void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp,
1690 Address mdo_addr, Label &next) {
1691 // There's a single possible klass at this profile point
1692 assert(exact_klass != nullptr, "should be");
1693 if (TypeEntries::is_type_none(current_klass)) {
1694 __ mov_metadata(tmp, exact_klass->constant_encoding());
1695 __ ld(t1, mdo_addr);
1696 __ xorr(tmp, tmp, t1);
1697 __ andi(t0, tmp, TypeEntries::type_klass_mask);
1698 __ beqz(t0, next);
1699 #ifdef ASSERT
1700 {
1701 Label ok;
1702 __ ld(t0, mdo_addr);
1703 __ beqz(t0, ok);
1704 __ mv(t1, (u1)TypeEntries::null_seen);
1705 __ beq(t0, t1, ok);
1706 // may have been set by another thread
1707 __ membar(MacroAssembler::LoadLoad);
1708 __ mov_metadata(t0, exact_klass->constant_encoding());
1709 __ ld(t1, mdo_addr);
1710 __ xorr(t1, t0, t1);
1711 __ andi(t1, t1, TypeEntries::type_mask);
1712 __ beqz(t1, ok);
1713
1714 __ stop("unexpected profiling mismatch");
1715 __ bind(ok);
1716 }
1717 #endif
1718 // first time here. Set profile type.
1719 __ sd(tmp, mdo_addr);
1720 #ifdef ASSERT
1721 __ andi(tmp, tmp, TypeEntries::type_mask);
1722 __ verify_klass_ptr(tmp);
1723 #endif
1724 } else {
1725 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
1726 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
1727
1728 __ ld(tmp, mdo_addr);
1729 // already unknown. Nothing to do anymore.
1730 __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown));
1731 __ bnez(t0, next);
1732
1733 __ ori(tmp, tmp, TypeEntries::type_unknown);
1734 __ sd(tmp, mdo_addr);
1735 }
1736 }
1737
1738 void LIR_Assembler::check_null(Register tmp, Label &update, intptr_t current_klass,
1739 Address mdo_addr, bool do_update, Label &next) {
1740 __ bnez(tmp, update);
1741 if (!TypeEntries::was_null_seen(current_klass)) {
1742 __ ld(t1, mdo_addr);
1743 __ ori(t1, t1, TypeEntries::null_seen);
1744 __ sd(t1, mdo_addr);
1745 }
1746 if (do_update) {
1747 __ j(next);
1748 }
1749 }
1750
1751 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
1752 COMMENT("emit_profile_type {");
1753 Register obj = op->obj()->as_register();
1754 Register tmp = op->tmp()->as_pointer_register();
1755 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
1756 ciKlass* exact_klass = op->exact_klass();
1757 intptr_t current_klass = op->current_klass();
1758 bool not_null = op->not_null();
1759 bool no_conflict = op->no_conflict();
1760
1761 Label update, next, none;
1762
1763 bool do_null = !not_null;
1764 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
1765 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
1766
1767 assert(do_null || do_update, "why are we here?");
1768 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
1769 assert_different_registers(tmp, t0, t1, mdo_addr.base());
1770
1771 __ verify_oop(obj);
1772
1773 if (tmp != obj) {
1774 __ mv(tmp, obj);
1775 }
1776 if (do_null) {
1777 check_null(tmp, update, current_klass, mdo_addr, do_update, next);
1778 #ifdef ASSERT
1779 } else {
1780 __ bnez(tmp, update);
1781 __ stop("unexpected null obj");
1782 #endif
1783 }
1784
1785 __ bind(update);
1786
1787 if (do_update) {
1788 #ifdef ASSERT
1789 if (exact_klass != nullptr) {
1790 check_exact_klass(tmp, exact_klass);
1791 }
1792 #endif
1793 if (!no_conflict) {
1794 check_conflict(exact_klass, current_klass, tmp, next, none, mdo_addr);
1795 } else {
1796 check_no_conflict(exact_klass, current_klass, tmp, mdo_addr, next);
1797 }
1798
1799 __ bind(next);
1800 }
1801 COMMENT("} emit_profile_type");
1802 }
1803
1804 void LIR_Assembler::align_backward_branch_target() { }
1805
1806 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
1807 // tmp must be unused
1808 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
1809
1810 if (left->is_single_cpu()) {
1811 assert(dest->is_single_cpu(), "expect single result reg");
1812 __ negw(dest->as_register(), left->as_register());
1813 } else if (left->is_double_cpu()) {
1814 assert(dest->is_double_cpu(), "expect double result reg");
1815 __ neg(dest->as_register_lo(), left->as_register_lo());
1816 } else if (left->is_single_fpu()) {
1817 assert(dest->is_single_fpu(), "expect single float result reg");
1818 __ fneg_s(dest->as_float_reg(), left->as_float_reg());
1819 } else {
1820 assert(left->is_double_fpu(), "expect double float operand reg");
1821 assert(dest->is_double_fpu(), "expect double float result reg");
1822 __ fneg_d(dest->as_double_reg(), left->as_double_reg());
1823 }
1824 }
1825
1826
1827 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1828 if (patch_code != lir_patch_none) {
1829 deoptimize_trap(info);
1830 return;
1831 }
1832
1833 LIR_Address* adr = addr->as_address_ptr();
1834 Register dst = dest->as_register_lo();
1835
1836 assert_different_registers(dst, t0);
1837 if (adr->base()->is_valid() && dst == adr->base()->as_pointer_register() && (!adr->index()->is_cpu_register())) {
1838 int scale = adr->scale();
1839 intptr_t offset = adr->disp();
1840 LIR_Opr index_op = adr->index();
1841 if (index_op->is_constant()) {
1842 offset += ((intptr_t)index_op->as_constant_ptr()->as_jint()) << scale;
1843 }
1844
1845 if (!Assembler::is_simm12(offset)) {
1846 __ la(t0, as_Address(adr));
1847 __ mv(dst, t0);
1848 return;
1849 }
1850 }
1851
1852 __ la(dst, as_Address(adr));
1853 }
1854
1855
1856 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
1857 assert(!tmp->is_valid(), "don't need temporary");
1858
1859 CodeBlob *cb = CodeCache::find_blob(dest);
1860 if (cb != nullptr) {
1861 __ far_call(RuntimeAddress(dest));
1862 } else {
1863 RuntimeAddress target(dest);
1864 __ relocate(target.rspec(), [&] {
1865 int32_t offset;
1866 __ la_patchable(t0, target, offset);
1867 __ jalr(x1, t0, offset);
1868 });
1869 }
1870
1871 if (info != nullptr) {
1872 add_call_info_here(info);
1873 }
1874 __ post_call_nop();
1875 }
1876
1877 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
1878 if (dest->is_address() || src->is_address()) {
1879 move_op(src, dest, type, lir_patch_none, info, /* pop_fpu_stack */ false, /* wide */ false);
1880 } else {
1881 ShouldNotReachHere();
1882 }
1883 }
1884
1885 #ifdef ASSERT
1886 // emit run-time assertion
1887 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
1888 assert(op->code() == lir_assert, "must be");
1889
1890 Label ok;
1891 if (op->in_opr1()->is_valid()) {
1892 assert(op->in_opr2()->is_valid(), "both operands must be valid");
1893 bool is_unordered = false;
1894 LIR_Condition cond = op->condition();
1895 emit_branch(cond, op->in_opr1(), op->in_opr2(), ok, /* is_far */ false,
1896 /* is_unordered */(cond == lir_cond_greaterEqual || cond == lir_cond_greater) ? false : true);
1897 } else {
1898 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
1899 assert(op->condition() == lir_cond_always, "no other conditions allowed");
1900 }
1901
1902 if (op->halt()) {
1903 const char* str = __ code_string(op->msg());
1904 __ stop(str);
1905 } else {
1906 breakpoint();
1907 }
1908 __ bind(ok);
1909 }
1910 #endif
1911
1912 #ifndef PRODUCT
1913 #define COMMENT(x) do { __ block_comment(x); } while (0)
1914 #else
1915 #define COMMENT(x)
1916 #endif
1917
1918 void LIR_Assembler::membar() {
1919 COMMENT("membar");
1920 __ membar(MacroAssembler::AnyAny);
1921 }
1922
1923 void LIR_Assembler::membar_acquire() {
1924 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
1925 }
1926
1927 void LIR_Assembler::membar_release() {
1928 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1929 }
1930
1931 void LIR_Assembler::membar_loadload() {
1932 __ membar(MacroAssembler::LoadLoad);
1933 }
1934
1935 void LIR_Assembler::membar_storestore() {
1936 __ membar(MacroAssembler::StoreStore);
1937 }
1938
1939 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
1940
1941 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
1942
1943 void LIR_Assembler::on_spin_wait() {
1944 __ pause();
1945 }
1946
1947 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
1948 __ mv(result_reg->as_register(), xthread);
1949 }
1950
1951 void LIR_Assembler::peephole(LIR_List *lir) {}
1952
1953 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
1954 Address addr = as_Address(src->as_address_ptr());
1955 BasicType type = src->type();
1956 bool is_oop = is_reference_type(type);
1957
1958 get_op(type);
1959
1960 switch (code) {
1961 case lir_xadd:
1962 {
1963 RegisterOrConstant inc;
1964 Register tmp = as_reg(tmp_op);
1965 Register dst = as_reg(dest);
1966 if (data->is_constant()) {
1967 inc = RegisterOrConstant(as_long(data));
1968 assert_different_registers(dst, addr.base(), tmp);
1969 assert_different_registers(tmp, t0);
1970 } else {
1971 inc = RegisterOrConstant(as_reg(data));
1972 assert_different_registers(inc.as_register(), dst, addr.base(), tmp);
1973 }
1974 __ la(tmp, addr);
1975 (_masm->*add)(dst, inc, tmp);
1976 break;
1977 }
1978 case lir_xchg:
1979 {
1980 Register tmp = tmp_op->as_register();
1981 Register obj = as_reg(data);
1982 Register dst = as_reg(dest);
1983 if (is_oop && UseCompressedOops) {
1984 __ encode_heap_oop(t0, obj);
1985 obj = t0;
1986 }
1987 assert_different_registers(obj, addr.base(), tmp);
1988 assert_different_registers(dst, addr.base(), tmp);
1989 __ la(tmp, addr);
1990 (_masm->*xchg)(dst, obj, tmp);
1991 if (is_oop && UseCompressedOops) {
1992 __ decode_heap_oop(dst);
1993 }
1994 }
1995 break;
1996 default:
1997 ShouldNotReachHere();
1998 }
1999 __ membar(MacroAssembler::AnyAny);
2000 }
2001
2002 int LIR_Assembler::array_element_size(BasicType type) const {
2003 int elem_size = type2aelembytes(type);
2004 return exact_log2(elem_size);
2005 }
2006
2007 // helper functions which checks for overflow and sets bailout if it
2008 // occurs. Always returns a valid embeddable pointer but in the
2009 // bailout case the pointer won't be to unique storage.
2010 address LIR_Assembler::float_constant(float f) {
2011 address const_addr = __ float_constant(f);
2012 if (const_addr == nullptr) {
2013 bailout("const section overflow");
2014 return __ code()->consts()->start();
2015 } else {
2016 return const_addr;
2017 }
2018 }
2019
2020 address LIR_Assembler::double_constant(double d) {
2021 address const_addr = __ double_constant(d);
2022 if (const_addr == nullptr) {
2023 bailout("const section overflow");
2024 return __ code()->consts()->start();
2025 } else {
2026 return const_addr;
2027 }
2028 }
2029
2030 address LIR_Assembler::int_constant(jlong n) {
2031 address const_addr = __ long_constant(n);
2032 if (const_addr == nullptr) {
2033 bailout("const section overflow");
2034 return __ code()->consts()->start();
2035 } else {
2036 return const_addr;
2037 }
2038 }
2039
2040 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
2041 __ cmpxchg(addr, cmpval, newval, Assembler::int32, Assembler::aq /* acquire */,
2042 Assembler::rl /* release */, t0, true /* result as bool */);
2043 __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1
2044 __ membar(MacroAssembler::AnyAny);
2045 }
2046
2047 void LIR_Assembler::caswu(Register addr, Register newval, Register cmpval) {
2048 __ cmpxchg(addr, cmpval, newval, Assembler::uint32, Assembler::aq /* acquire */,
2049 Assembler::rl /* release */, t0, true /* result as bool */);
2050 __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1
2051 __ membar(MacroAssembler::AnyAny);
2052 }
2053
2054 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
2055 __ cmpxchg(addr, cmpval, newval, Assembler::int64, Assembler::aq /* acquire */,
2056 Assembler::rl /* release */, t0, true /* result as bool */);
2057 __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1
2058 __ membar(MacroAssembler::AnyAny);
2059 }
2060
2061 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
2062 address target = nullptr;
2063
2064 switch (patching_id(info)) {
2065 case PatchingStub::access_field_id:
2066 target = Runtime1::entry_for(Runtime1::access_field_patching_id);
2067 break;
2068 case PatchingStub::load_klass_id:
2069 target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
2070 break;
2071 case PatchingStub::load_mirror_id:
2072 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
2073 break;
2074 case PatchingStub::load_appendix_id:
2075 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
2076 break;
2077 default: ShouldNotReachHere();
2078 }
2079
2080 __ far_call(RuntimeAddress(target));
2081 add_call_info_here(info);
2082 }
2083
2084 void LIR_Assembler::check_exact_klass(Register tmp, ciKlass* exact_klass) {
2085 Label ok;
2086 __ load_klass(tmp, tmp);
2087 __ mov_metadata(t0, exact_klass->constant_encoding());
2088 __ beq(tmp, t0, ok);
2089 __ stop("exact klass and actual klass differ");
2090 __ bind(ok);
2091 }
2092
2093 void LIR_Assembler::get_op(BasicType type) {
2094 switch (type) {
2095 case T_INT:
2096 xchg = &MacroAssembler::atomic_xchgalw;
2097 add = &MacroAssembler::atomic_addalw;
2098 break;
2099 case T_LONG:
2100 xchg = &MacroAssembler::atomic_xchgal;
2101 add = &MacroAssembler::atomic_addal;
2102 break;
2103 case T_OBJECT:
2104 case T_ARRAY:
2105 if (UseCompressedOops) {
2106 xchg = &MacroAssembler::atomic_xchgalwu;
2107 add = &MacroAssembler::atomic_addalw;
2108 } else {
2109 xchg = &MacroAssembler::atomic_xchgal;
2110 add = &MacroAssembler::atomic_addal;
2111 }
2112 break;
2113 default:
2114 ShouldNotReachHere();
2115 }
2116 }
2117
2118 // emit_opTypeCheck sub functions
2119 void LIR_Assembler::typecheck_lir_store(LIR_OpTypeCheck* op, bool should_profile) {
2120 Register value = op->object()->as_register();
2121 Register array = op->array()->as_register();
2122 Register k_RInfo = op->tmp1()->as_register();
2123 Register klass_RInfo = op->tmp2()->as_register();
2124 Register Rtmp1 = op->tmp3()->as_register();
2125
2126 CodeStub* stub = op->stub();
2127
2128 // check if it needs to be profiled
2129 ciMethodData* md = nullptr;
2130 ciProfileData* data = nullptr;
2131
2132 if (should_profile) {
2133 data_check(op, &md, &data);
2134 }
2135 Label profile_cast_success, profile_cast_failure, done;
2136 Label *success_target = should_profile ? &profile_cast_success : &done;
2137 Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
2138
2139 if (should_profile) {
2140 profile_object(md, data, value, klass_RInfo, &done);
2141 } else {
2142 __ beqz(value, done);
2143 }
2144
2145 add_debug_info_for_null_check_here(op->info_for_exception());
2146 __ load_klass(k_RInfo, array);
2147 __ load_klass(klass_RInfo, value);
2148
2149 lir_store_slowcheck(k_RInfo, klass_RInfo, Rtmp1, success_target, failure_target);
2150
2151 // fall through to the success case
2152 if (should_profile) {
2153 Register mdo = klass_RInfo;
2154 Register recv = k_RInfo;
2155 __ bind(profile_cast_success);
2156 __ mov_metadata(mdo, md->constant_encoding());
2157 __ load_klass(recv, value);
2158 type_profile_helper(mdo, md, data, recv, &done);
2159 __ j(done);
2160
2161 __ bind(profile_cast_failure);
2162 __ mov_metadata(mdo, md->constant_encoding());
2163 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2164 __ ld(t1, counter_addr);
2165 __ addi(t1, t1, -DataLayout::counter_increment);
2166 __ sd(t1, counter_addr);
2167 __ j(*stub->entry());
2168 }
2169
2170 __ bind(done);
2171 }
2172
2173 void LIR_Assembler::type_profile(Register obj, ciMethodData* md, Register klass_RInfo, Register k_RInfo,
2174 ciProfileData* data, Label* success, Label* failure,
2175 Label& profile_cast_success, Label& profile_cast_failure) {
2176 Register mdo = klass_RInfo;
2177 Register recv = k_RInfo;
2178 __ bind(profile_cast_success);
2179 __ mov_metadata(mdo, md->constant_encoding());
2180 __ load_klass(recv, obj);
2181 Label update_done;
2182 type_profile_helper(mdo, md, data, recv, success);
2183 __ j(*success);
2184
2185 __ bind(profile_cast_failure);
2186 __ mov_metadata(mdo, md->constant_encoding());
2187 Address counter_addr = __ form_address(t1, mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2188 __ ld(t0, counter_addr);
2189 __ addi(t0, t0, -DataLayout::counter_increment);
2190 __ sd(t0, counter_addr);
2191 __ j(*failure);
2192 }
2193
2194 void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo, Register Rtmp1,
2195 Label* success_target, Label* failure_target) {
2196 // get instance klass (it's already uncompressed)
2197 __ ld(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
2198 // perform the fast part of the checking logic
2199 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
2200 // call out-of-line instance of __ check_klass_subtype_slow_path(...)
2201 __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
2202 __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass
2203 __ sd(k_RInfo, Address(sp, 0)); // super klass
2204 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2205 // load result to k_RInfo
2206 __ ld(k_RInfo, Address(sp, 0));
2207 __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
2208 // result is a boolean
2209 __ beqz(k_RInfo, *failure_target, /* is_far */ true);
2210 }
2211
2212 void LIR_Assembler::const2reg_helper(LIR_Opr src) {
2213 switch (src->as_constant_ptr()->type()) {
2214 case T_INT:
2215 case T_ADDRESS:
2216 case T_OBJECT:
2217 case T_ARRAY:
2218 case T_METADATA:
2219 const2reg(src, FrameMap::t0_opr, lir_patch_none, nullptr);
2220 break;
2221 case T_LONG:
2222 const2reg(src, FrameMap::t0_long_opr, lir_patch_none, nullptr);
2223 break;
2224 case T_FLOAT:
2225 case T_DOUBLE:
2226 default:
2227 ShouldNotReachHere();
2228 }
2229 }
2230
2231 void LIR_Assembler::logic_op_reg32(Register dst, Register left, Register right, LIR_Code code) {
2232 switch (code) {
2233 case lir_logic_and: __ andrw(dst, left, right); break;
2234 case lir_logic_or: __ orrw (dst, left, right); break;
2235 case lir_logic_xor: __ xorrw(dst, left, right); break;
2236 default: ShouldNotReachHere();
2237 }
2238 }
2239
2240 void LIR_Assembler::logic_op_reg(Register dst, Register left, Register right, LIR_Code code) {
2241 switch (code) {
2242 case lir_logic_and: __ andr(dst, left, right); break;
2243 case lir_logic_or: __ orr (dst, left, right); break;
2244 case lir_logic_xor: __ xorr(dst, left, right); break;
2245 default: ShouldNotReachHere();
2246 }
2247 }
2248
2249 void LIR_Assembler::logic_op_imm(Register dst, Register left, int right, LIR_Code code) {
2250 switch (code) {
2251 case lir_logic_and: __ andi(dst, left, right); break;
2252 case lir_logic_or: __ ori (dst, left, right); break;
2253 case lir_logic_xor: __ xori(dst, left, right); break;
2254 default: ShouldNotReachHere();
2255 }
2256 }
2257
2258 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2259 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2260 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2261 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2262 __ sd(r, Address(sp, offset_from_rsp_in_bytes));
2263 }
2264
2265 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2266 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2267 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2268 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2269 __ mv(t0, c);
2270 __ sd(t0, Address(sp, offset_from_rsp_in_bytes));
2271 }
2272
2273 #undef __