1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "code/compiledIC.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gc_globals.hpp"
39 #include "nativeInst_aarch64.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "utilities/powerOfTwo.hpp"
45 #include "vmreg_aarch64.inline.hpp"
46
47
48 #ifndef PRODUCT
49 #define COMMENT(x) do { __ block_comment(x); } while (0)
50 #else
51 #define COMMENT(x)
52 #endif
53
54 NEEDS_CLEANUP // remove this definitions ?
55 const Register SYNC_header = r0; // synchronization header
56 const Register SHIFT_count = r0; // where count for shift operations must be
57
58 #define __ _masm->
59
60
61 static void select_different_registers(Register preserve,
62 Register extra,
63 Register &tmp1,
64 Register &tmp2) {
65 if (tmp1 == preserve) {
66 assert_different_registers(tmp1, tmp2, extra);
67 tmp1 = extra;
68 } else if (tmp2 == preserve) {
69 assert_different_registers(tmp1, tmp2, extra);
70 tmp2 = extra;
71 }
72 assert_different_registers(preserve, tmp1, tmp2);
73 }
74
75
76
77 static void select_different_registers(Register preserve,
78 Register extra,
79 Register &tmp1,
80 Register &tmp2,
81 Register &tmp3) {
82 if (tmp1 == preserve) {
83 assert_different_registers(tmp1, tmp2, tmp3, extra);
84 tmp1 = extra;
85 } else if (tmp2 == preserve) {
86 assert_different_registers(tmp1, tmp2, tmp3, extra);
87 tmp2 = extra;
88 } else if (tmp3 == preserve) {
89 assert_different_registers(tmp1, tmp2, tmp3, extra);
90 tmp3 = extra;
91 }
92 assert_different_registers(preserve, tmp1, tmp2, tmp3);
93 }
94
95
96 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
97
98
99 LIR_Opr LIR_Assembler::receiverOpr() {
100 return FrameMap::receiver_opr;
101 }
102
103 LIR_Opr LIR_Assembler::osrBufferPointer() {
104 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
105 }
106
107 //--------------fpu register translations-----------------------
108
109
110 address LIR_Assembler::float_constant(float f) {
111 address const_addr = __ float_constant(f);
112 if (const_addr == nullptr) {
113 bailout("const section overflow");
114 return __ code()->consts()->start();
115 } else {
116 return const_addr;
117 }
118 }
119
120
121 address LIR_Assembler::double_constant(double d) {
122 address const_addr = __ double_constant(d);
123 if (const_addr == nullptr) {
124 bailout("const section overflow");
125 return __ code()->consts()->start();
126 } else {
127 return const_addr;
128 }
129 }
130
131 address LIR_Assembler::int_constant(jlong n) {
132 address const_addr = __ long_constant(n);
133 if (const_addr == nullptr) {
134 bailout("const section overflow");
135 return __ code()->consts()->start();
136 } else {
137 return const_addr;
138 }
139 }
140
141 void LIR_Assembler::breakpoint() { Unimplemented(); }
142
143 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
144
145 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
146
147 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
148 //-------------------------------------------
149
150 static Register as_reg(LIR_Opr op) {
151 return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
152 }
153
154 static jlong as_long(LIR_Opr data) {
155 jlong result;
156 switch (data->type()) {
157 case T_INT:
158 result = (data->as_jint());
159 break;
160 case T_LONG:
161 result = (data->as_jlong());
162 break;
163 default:
164 ShouldNotReachHere();
165 result = 0; // unreachable
166 }
167 return result;
168 }
169
170 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
171 Register base = addr->base()->as_pointer_register();
172 LIR_Opr opr = addr->index();
173 if (opr->is_cpu_register()) {
174 Register index;
175 if (opr->is_single_cpu())
176 index = opr->as_register();
177 else
178 index = opr->as_register_lo();
179 assert(addr->disp() == 0, "must be");
180 switch(opr->type()) {
181 case T_INT:
182 return Address(base, index, Address::sxtw(addr->scale()));
183 case T_LONG:
184 return Address(base, index, Address::lsl(addr->scale()));
185 default:
186 ShouldNotReachHere();
187 }
188 } else {
189 assert(addr->scale() == 0,
190 "expected for immediate operand, was: %d", addr->scale());
191 ptrdiff_t offset = ptrdiff_t(addr->disp());
192 // NOTE: Does not handle any 16 byte vector access.
193 const uint type_size = type2aelembytes(addr->type(), true);
194 return __ legitimize_address(Address(base, offset), type_size, tmp);
195 }
196 return Address();
197 }
198
199 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
200 ShouldNotReachHere();
201 return Address();
202 }
203
204 Address LIR_Assembler::as_Address(LIR_Address* addr) {
205 return as_Address(addr, rscratch1);
206 }
207
208 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
209 return as_Address(addr, rscratch1); // Ouch
210 // FIXME: This needs to be much more clever. See x86.
211 }
212
213 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
214 // not encodable as a base + (immediate) offset, generate an explicit address
215 // calculation to hold the address in a temporary register.
216 Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {
217 precond(size == 4 || size == 8);
218 Address addr = frame_map()->address_for_slot(index, adjust);
219 precond(addr.getMode() == Address::base_plus_offset);
220 precond(addr.base() == sp);
221 precond(addr.offset() > 0);
222 uint mask = size - 1;
223 assert((addr.offset() & mask) == 0, "scaled offsets only");
224 return __ legitimize_address(addr, size, tmp);
225 }
226
227 void LIR_Assembler::osr_entry() {
228 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
229 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
230 ValueStack* entry_state = osr_entry->state();
231 int number_of_locks = entry_state->locks_size();
232
233 // we jump here if osr happens with the interpreter
234 // state set up to continue at the beginning of the
235 // loop that triggered osr - in particular, we have
236 // the following registers setup:
237 //
238 // r2: osr buffer
239 //
240
241 // build frame
242 ciMethod* m = compilation()->method();
243 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
244
245 // OSR buffer is
246 //
247 // locals[nlocals-1..0]
248 // monitors[0..number_of_locks]
249 //
250 // locals is a direct copy of the interpreter frame so in the osr buffer
251 // so first slot in the local array is the last local from the interpreter
252 // and last slot is local[0] (receiver) from the interpreter
253 //
254 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
255 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
256 // in the interpreter frame (the method lock if a sync method)
257
258 // Initialize monitors in the compiled activation.
259 // r2: pointer to osr buffer
260 //
261 // All other registers are dead at this point and the locals will be
262 // copied into place by code emitted in the IR.
263
264 Register OSR_buf = osrBufferPointer()->as_pointer_register();
265 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
266 int monitor_offset = BytesPerWord * method()->max_locals() +
267 (2 * BytesPerWord) * (number_of_locks - 1);
268 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
269 // the OSR buffer using 2 word entries: first the lock and then
270 // the oop.
271 for (int i = 0; i < number_of_locks; i++) {
272 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
273 #ifdef ASSERT
274 // verify the interpreter's monitor has a non-null object
275 {
276 Label L;
277 __ ldr(rscratch1, __ form_address(rscratch1, OSR_buf, slot_offset + 1*BytesPerWord, 0));
278 __ cbnz(rscratch1, L);
279 __ stop("locked object is null");
280 __ bind(L);
281 }
282 #endif
283 __ ldr(r19, __ form_address(rscratch1, OSR_buf, slot_offset, 0));
284 __ ldr(r20, __ form_address(rscratch1, OSR_buf, slot_offset + BytesPerWord, 0));
285 __ str(r19, frame_map()->address_for_monitor_lock(i));
286 __ str(r20, frame_map()->address_for_monitor_object(i));
287 }
288 }
289 }
290
291
292 // inline cache check; done before the frame is built.
293 int LIR_Assembler::check_icache() {
294 return __ ic_check(CodeEntryAlignment);
295 }
296
297 void LIR_Assembler::clinit_barrier(ciMethod* method) {
298 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
299 assert(!method->holder()->is_not_initialized(), "initialization should have been started");
300
301 Label L_skip_barrier;
302
303 __ mov_metadata(rscratch2, method->holder()->constant_encoding());
304 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier /*L_fast_path*/);
305 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
306 __ bind(L_skip_barrier);
307 }
308
309 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
310 if (o == nullptr) {
311 __ mov(reg, zr);
312 } else {
313 __ movoop(reg, o);
314 }
315 }
316
317 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
318 address target = nullptr;
319 relocInfo::relocType reloc_type = relocInfo::none;
320
321 switch (patching_id(info)) {
322 case PatchingStub::access_field_id:
323 target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
324 reloc_type = relocInfo::section_word_type;
325 break;
326 case PatchingStub::load_klass_id:
327 target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
328 reloc_type = relocInfo::metadata_type;
329 break;
330 case PatchingStub::load_mirror_id:
331 target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
332 reloc_type = relocInfo::oop_type;
333 break;
334 case PatchingStub::load_appendix_id:
335 target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
336 reloc_type = relocInfo::oop_type;
337 break;
338 default: ShouldNotReachHere();
339 }
340
341 __ far_call(RuntimeAddress(target));
342 add_call_info_here(info);
343 }
344
345 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
346 deoptimize_trap(info);
347 }
348
349
350 // This specifies the rsp decrement needed to build the frame
351 int LIR_Assembler::initial_frame_size_in_bytes() const {
352 // if rounding, must let FrameMap know!
353
354 return in_bytes(frame_map()->framesize_in_bytes());
355 }
356
357
358 int LIR_Assembler::emit_exception_handler() {
359 // generate code for exception handler
360 address handler_base = __ start_a_stub(exception_handler_size());
361 if (handler_base == nullptr) {
362 // not enough space left for the handler
363 bailout("exception handler overflow");
364 return -1;
365 }
366
367 int offset = code_offset();
368
369 // the exception oop and pc are in r0, and r3
370 // no other registers need to be preserved, so invalidate them
371 __ invalidate_registers(false, true, true, false, true, true);
372
373 // check that there is really an exception
374 __ verify_not_null_oop(r0);
375
376 // search an exception handler (r0: exception oop, r3: throwing pc)
377 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
378 __ should_not_reach_here();
379 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
380 __ end_a_stub();
381
382 return offset;
383 }
384
385
386 // Emit the code to remove the frame from the stack in the exception
387 // unwind path.
388 int LIR_Assembler::emit_unwind_handler() {
389 #ifndef PRODUCT
390 if (CommentedAssembly) {
391 _masm->block_comment("Unwind handler");
392 }
393 #endif
394
395 int offset = code_offset();
396
397 // Fetch the exception from TLS and clear out exception related thread state
398 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
399 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
400 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
401
402 __ bind(_unwind_handler_entry);
403 __ verify_not_null_oop(r0);
404 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
405 __ mov(r19, r0); // Preserve the exception
406 }
407
408 // Perform needed unlocking
409 MonitorExitStub* stub = nullptr;
410 if (method()->is_synchronized()) {
411 monitor_address(0, FrameMap::r0_opr);
412 stub = new MonitorExitStub(FrameMap::r0_opr, 0);
413 __ unlock_object(r5, r4, r0, r6, *stub->entry());
414 __ bind(*stub->continuation());
415 }
416
417 if (compilation()->env()->dtrace_method_probes()) {
418 __ mov(c_rarg0, rthread);
419 __ mov_metadata(c_rarg1, method()->constant_encoding());
420 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
421 }
422
423 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
424 __ mov(r0, r19); // Restore the exception
425 }
426
427 // remove the activation and dispatch to the unwind handler
428 __ block_comment("remove_frame and dispatch to the unwind handler");
429 __ remove_frame(initial_frame_size_in_bytes());
430 __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
431
432 // Emit the slow path assembly
433 if (stub != nullptr) {
434 stub->emit_code(this);
435 }
436
437 return offset;
438 }
439
440
441 int LIR_Assembler::emit_deopt_handler() {
442 // generate code for exception handler
443 address handler_base = __ start_a_stub(deopt_handler_size());
444 if (handler_base == nullptr) {
445 // not enough space left for the handler
446 bailout("deopt handler overflow");
447 return -1;
448 }
449
450 int offset = code_offset();
451
452 __ adr(lr, pc());
453 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
454 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
455 __ end_a_stub();
456
457 return offset;
458 }
459
460 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
461 _masm->code_section()->relocate(adr, relocInfo::poll_type);
462 int pc_offset = code_offset();
463 flush_debug_info(pc_offset);
464 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
465 if (info->exception_handlers() != nullptr) {
466 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
467 }
468 }
469
470 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
471 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
472
473 // Pop the stack before the safepoint code
474 __ remove_frame(initial_frame_size_in_bytes());
475
476 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
477 __ reserved_stack_check();
478 }
479
480 code_stub->set_safepoint_offset(__ offset());
481 __ relocate(relocInfo::poll_return_type);
482 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
483 __ ret(lr);
484 }
485
486 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
487 guarantee(info != nullptr, "Shouldn't be null");
488 __ get_polling_page(rscratch1, relocInfo::poll_type);
489 add_debug_info_for_branch(info); // This isn't just debug info:
490 // it's the oop map
491 __ read_polling_page(rscratch1, relocInfo::poll_type);
492 return __ offset();
493 }
494
495
496 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
497 if (from_reg == r31_sp)
498 from_reg = sp;
499 if (to_reg == r31_sp)
500 to_reg = sp;
501 __ mov(to_reg, from_reg);
502 }
503
504 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
505
506
507 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
508 assert(src->is_constant(), "should not call otherwise");
509 assert(dest->is_register(), "should not call otherwise");
510 LIR_Const* c = src->as_constant_ptr();
511
512 switch (c->type()) {
513 case T_INT: {
514 assert(patch_code == lir_patch_none, "no patching handled here");
515 __ movw(dest->as_register(), c->as_jint());
516 break;
517 }
518
519 case T_ADDRESS: {
520 assert(patch_code == lir_patch_none, "no patching handled here");
521 __ mov(dest->as_register(), c->as_jint());
522 break;
523 }
524
525 case T_LONG: {
526 assert(patch_code == lir_patch_none, "no patching handled here");
527 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
528 break;
529 }
530
531 case T_OBJECT: {
532 if (patch_code == lir_patch_none) {
533 jobject2reg(c->as_jobject(), dest->as_register());
534 } else {
535 jobject2reg_with_patching(dest->as_register(), info);
536 }
537 break;
538 }
539
540 case T_METADATA: {
541 if (patch_code != lir_patch_none) {
542 klass2reg_with_patching(dest->as_register(), info);
543 } else {
544 __ mov_metadata(dest->as_register(), c->as_metadata());
545 }
546 break;
547 }
548
549 case T_FLOAT: {
550 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
551 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
552 } else {
553 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
554 __ ldrs(dest->as_float_reg(), Address(rscratch1));
555 }
556 break;
557 }
558
559 case T_DOUBLE: {
560 if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
561 __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
562 } else {
563 __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
564 __ ldrd(dest->as_double_reg(), Address(rscratch1));
565 }
566 break;
567 }
568
569 default:
570 ShouldNotReachHere();
571 }
572 }
573
574 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
575 LIR_Const* c = src->as_constant_ptr();
576 switch (c->type()) {
577 case T_OBJECT:
578 {
579 if (! c->as_jobject())
580 __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
581 else {
582 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
583 reg2stack(FrameMap::rscratch1_opr, dest, c->type());
584 }
585 }
586 break;
587 case T_ADDRESS:
588 {
589 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
590 reg2stack(FrameMap::rscratch1_opr, dest, c->type());
591 }
592 case T_INT:
593 case T_FLOAT:
594 {
595 Register reg = zr;
596 if (c->as_jint_bits() == 0)
597 __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
598 else {
599 __ movw(rscratch1, c->as_jint_bits());
600 __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
601 }
602 }
603 break;
604 case T_LONG:
605 case T_DOUBLE:
606 {
607 Register reg = zr;
608 if (c->as_jlong_bits() == 0)
609 __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
610 lo_word_offset_in_bytes));
611 else {
612 __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
613 __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
614 lo_word_offset_in_bytes));
615 }
616 }
617 break;
618 default:
619 ShouldNotReachHere();
620 }
621 }
622
623 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
624 assert(src->is_constant(), "should not call otherwise");
625 LIR_Const* c = src->as_constant_ptr();
626 LIR_Address* to_addr = dest->as_address_ptr();
627
628 void (Assembler::* insn)(Register Rt, const Address &adr);
629
630 switch (type) {
631 case T_ADDRESS:
632 assert(c->as_jint() == 0, "should be");
633 insn = &Assembler::str;
634 break;
635 case T_LONG:
636 assert(c->as_jlong() == 0, "should be");
637 insn = &Assembler::str;
638 break;
639 case T_INT:
640 assert(c->as_jint() == 0, "should be");
641 insn = &Assembler::strw;
642 break;
643 case T_OBJECT:
644 case T_ARRAY:
645 assert(c->as_jobject() == nullptr, "should be");
646 if (UseCompressedOops && !wide) {
647 insn = &Assembler::strw;
648 } else {
649 insn = &Assembler::str;
650 }
651 break;
652 case T_CHAR:
653 case T_SHORT:
654 assert(c->as_jint() == 0, "should be");
655 insn = &Assembler::strh;
656 break;
657 case T_BOOLEAN:
658 case T_BYTE:
659 assert(c->as_jint() == 0, "should be");
660 insn = &Assembler::strb;
661 break;
662 default:
663 ShouldNotReachHere();
664 insn = &Assembler::str; // unreachable
665 }
666
667 if (info) add_debug_info_for_null_check_here(info);
668 (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
669 }
670
671 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
672 assert(src->is_register(), "should not call otherwise");
673 assert(dest->is_register(), "should not call otherwise");
674
675 // move between cpu-registers
676 if (dest->is_single_cpu()) {
677 if (src->type() == T_LONG) {
678 // Can do LONG -> OBJECT
679 move_regs(src->as_register_lo(), dest->as_register());
680 return;
681 }
682 assert(src->is_single_cpu(), "must match");
683 if (src->type() == T_OBJECT) {
684 __ verify_oop(src->as_register());
685 }
686 move_regs(src->as_register(), dest->as_register());
687
688 } else if (dest->is_double_cpu()) {
689 if (is_reference_type(src->type())) {
690 // Surprising to me but we can see move of a long to t_object
691 __ verify_oop(src->as_register());
692 move_regs(src->as_register(), dest->as_register_lo());
693 return;
694 }
695 assert(src->is_double_cpu(), "must match");
696 Register f_lo = src->as_register_lo();
697 Register f_hi = src->as_register_hi();
698 Register t_lo = dest->as_register_lo();
699 Register t_hi = dest->as_register_hi();
700 assert(f_hi == f_lo, "must be same");
701 assert(t_hi == t_lo, "must be same");
702 move_regs(f_lo, t_lo);
703
704 } else if (dest->is_single_fpu()) {
705 __ fmovs(dest->as_float_reg(), src->as_float_reg());
706
707 } else if (dest->is_double_fpu()) {
708 __ fmovd(dest->as_double_reg(), src->as_double_reg());
709
710 } else {
711 ShouldNotReachHere();
712 }
713 }
714
715 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
716 precond(src->is_register() && dest->is_stack());
717
718 uint const c_sz32 = sizeof(uint32_t);
719 uint const c_sz64 = sizeof(uint64_t);
720
721 if (src->is_single_cpu()) {
722 int index = dest->single_stack_ix();
723 if (is_reference_type(type)) {
724 __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
725 __ verify_oop(src->as_register());
726 } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
727 __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
728 } else {
729 __ strw(src->as_register(), stack_slot_address(index, c_sz32, rscratch1));
730 }
731
732 } else if (src->is_double_cpu()) {
733 int index = dest->double_stack_ix();
734 Address dest_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
735 __ str(src->as_register_lo(), dest_addr_LO);
736
737 } else if (src->is_single_fpu()) {
738 int index = dest->single_stack_ix();
739 __ strs(src->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
740
741 } else if (src->is_double_fpu()) {
742 int index = dest->double_stack_ix();
743 __ strd(src->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
744
745 } else {
746 ShouldNotReachHere();
747 }
748 }
749
750
751 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
752 LIR_Address* to_addr = dest->as_address_ptr();
753 PatchingStub* patch = nullptr;
754 Register compressed_src = rscratch1;
755
756 if (patch_code != lir_patch_none) {
757 deoptimize_trap(info);
758 return;
759 }
760
761 if (is_reference_type(type)) {
762 __ verify_oop(src->as_register());
763
764 if (UseCompressedOops && !wide) {
765 __ encode_heap_oop(compressed_src, src->as_register());
766 } else {
767 compressed_src = src->as_register();
768 }
769 }
770
771 int null_check_here = code_offset();
772 switch (type) {
773 case T_FLOAT: {
774 __ strs(src->as_float_reg(), as_Address(to_addr));
775 break;
776 }
777
778 case T_DOUBLE: {
779 __ strd(src->as_double_reg(), as_Address(to_addr));
780 break;
781 }
782
783 case T_ARRAY: // fall through
784 case T_OBJECT: // fall through
785 if (UseCompressedOops && !wide) {
786 __ strw(compressed_src, as_Address(to_addr, rscratch2));
787 } else {
788 __ str(compressed_src, as_Address(to_addr));
789 }
790 break;
791 case T_METADATA:
792 // We get here to store a method pointer to the stack to pass to
793 // a dtrace runtime call. This can't work on 64 bit with
794 // compressed klass ptrs: T_METADATA can be a compressed klass
795 // ptr or a 64 bit method pointer.
796 ShouldNotReachHere();
797 __ str(src->as_register(), as_Address(to_addr));
798 break;
799 case T_ADDRESS:
800 __ str(src->as_register(), as_Address(to_addr));
801 break;
802 case T_INT:
803 __ strw(src->as_register(), as_Address(to_addr));
804 break;
805
806 case T_LONG: {
807 __ str(src->as_register_lo(), as_Address_lo(to_addr));
808 break;
809 }
810
811 case T_BYTE: // fall through
812 case T_BOOLEAN: {
813 __ strb(src->as_register(), as_Address(to_addr));
814 break;
815 }
816
817 case T_CHAR: // fall through
818 case T_SHORT:
819 __ strh(src->as_register(), as_Address(to_addr));
820 break;
821
822 default:
823 ShouldNotReachHere();
824 }
825 if (info != nullptr) {
826 add_debug_info_for_null_check(null_check_here, info);
827 }
828 }
829
830
831 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
832 precond(src->is_stack() && dest->is_register());
833
834 uint const c_sz32 = sizeof(uint32_t);
835 uint const c_sz64 = sizeof(uint64_t);
836
837 if (dest->is_single_cpu()) {
838 int index = src->single_stack_ix();
839 if (is_reference_type(type)) {
840 __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
841 __ verify_oop(dest->as_register());
842 } else if (type == T_METADATA || type == T_ADDRESS) {
843 __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
844 } else {
845 __ ldrw(dest->as_register(), stack_slot_address(index, c_sz32, rscratch1));
846 }
847
848 } else if (dest->is_double_cpu()) {
849 int index = src->double_stack_ix();
850 Address src_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
851 __ ldr(dest->as_register_lo(), src_addr_LO);
852
853 } else if (dest->is_single_fpu()) {
854 int index = src->single_stack_ix();
855 __ ldrs(dest->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
856
857 } else if (dest->is_double_fpu()) {
858 int index = src->double_stack_ix();
859 __ ldrd(dest->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
860
861 } else {
862 ShouldNotReachHere();
863 }
864 }
865
866
867 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
868 address target = nullptr;
869 relocInfo::relocType reloc_type = relocInfo::none;
870
871 switch (patching_id(info)) {
872 case PatchingStub::access_field_id:
873 target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
874 reloc_type = relocInfo::section_word_type;
875 break;
876 case PatchingStub::load_klass_id:
877 target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
878 reloc_type = relocInfo::metadata_type;
879 break;
880 case PatchingStub::load_mirror_id:
881 target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
882 reloc_type = relocInfo::oop_type;
883 break;
884 case PatchingStub::load_appendix_id:
885 target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
886 reloc_type = relocInfo::oop_type;
887 break;
888 default: ShouldNotReachHere();
889 }
890
891 __ far_call(RuntimeAddress(target));
892 add_call_info_here(info);
893 }
894
895 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
896
897 LIR_Opr temp;
898 if (type == T_LONG || type == T_DOUBLE)
899 temp = FrameMap::rscratch1_long_opr;
900 else
901 temp = FrameMap::rscratch1_opr;
902
903 stack2reg(src, temp, src->type());
904 reg2stack(temp, dest, dest->type());
905 }
906
907
908 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
909 LIR_Address* addr = src->as_address_ptr();
910 LIR_Address* from_addr = src->as_address_ptr();
911
912 if (addr->base()->type() == T_OBJECT) {
913 __ verify_oop(addr->base()->as_pointer_register());
914 }
915
916 if (patch_code != lir_patch_none) {
917 deoptimize_trap(info);
918 return;
919 }
920
921 if (info != nullptr) {
922 add_debug_info_for_null_check_here(info);
923 }
924 int null_check_here = code_offset();
925 switch (type) {
926 case T_FLOAT: {
927 __ ldrs(dest->as_float_reg(), as_Address(from_addr));
928 break;
929 }
930
931 case T_DOUBLE: {
932 __ ldrd(dest->as_double_reg(), as_Address(from_addr));
933 break;
934 }
935
936 case T_ARRAY: // fall through
937 case T_OBJECT: // fall through
938 if (UseCompressedOops && !wide) {
939 __ ldrw(dest->as_register(), as_Address(from_addr));
940 } else {
941 __ ldr(dest->as_register(), as_Address(from_addr));
942 }
943 break;
944 case T_METADATA:
945 // We get here to store a method pointer to the stack to pass to
946 // a dtrace runtime call. This can't work on 64 bit with
947 // compressed klass ptrs: T_METADATA can be a compressed klass
948 // ptr or a 64 bit method pointer.
949 ShouldNotReachHere();
950 __ ldr(dest->as_register(), as_Address(from_addr));
951 break;
952 case T_ADDRESS:
953 __ ldr(dest->as_register(), as_Address(from_addr));
954 break;
955 case T_INT:
956 __ ldrw(dest->as_register(), as_Address(from_addr));
957 break;
958
959 case T_LONG: {
960 __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
961 break;
962 }
963
964 case T_BYTE:
965 __ ldrsb(dest->as_register(), as_Address(from_addr));
966 break;
967 case T_BOOLEAN: {
968 __ ldrb(dest->as_register(), as_Address(from_addr));
969 break;
970 }
971
972 case T_CHAR:
973 __ ldrh(dest->as_register(), as_Address(from_addr));
974 break;
975 case T_SHORT:
976 __ ldrsh(dest->as_register(), as_Address(from_addr));
977 break;
978
979 default:
980 ShouldNotReachHere();
981 }
982
983 if (is_reference_type(type)) {
984 if (UseCompressedOops && !wide) {
985 __ decode_heap_oop(dest->as_register());
986 }
987
988 __ verify_oop(dest->as_register());
989 }
990 }
991
992
993 int LIR_Assembler::array_element_size(BasicType type) const {
994 int elem_size = type2aelembytes(type);
995 return exact_log2(elem_size);
996 }
997
998
999 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1000 switch (op->code()) {
1001 case lir_idiv:
1002 case lir_irem:
1003 arithmetic_idiv(op->code(),
1004 op->in_opr1(),
1005 op->in_opr2(),
1006 op->in_opr3(),
1007 op->result_opr(),
1008 op->info());
1009 break;
1010 case lir_fmad:
1011 __ fmaddd(op->result_opr()->as_double_reg(),
1012 op->in_opr1()->as_double_reg(),
1013 op->in_opr2()->as_double_reg(),
1014 op->in_opr3()->as_double_reg());
1015 break;
1016 case lir_fmaf:
1017 __ fmadds(op->result_opr()->as_float_reg(),
1018 op->in_opr1()->as_float_reg(),
1019 op->in_opr2()->as_float_reg(),
1020 op->in_opr3()->as_float_reg());
1021 break;
1022 default: ShouldNotReachHere(); break;
1023 }
1024 }
1025
1026 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1027 #ifdef ASSERT
1028 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1029 if (op->block() != nullptr) _branch_target_blocks.append(op->block());
1030 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1031 #endif
1032
1033 if (op->cond() == lir_cond_always) {
1034 if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1035 __ b(*(op->label()));
1036 } else {
1037 Assembler::Condition acond;
1038 if (op->code() == lir_cond_float_branch) {
1039 bool is_unordered = (op->ublock() == op->block());
1040 // Assembler::EQ does not permit unordered branches, so we add
1041 // another branch here. Likewise, Assembler::NE does not permit
1042 // ordered branches.
1043 if ((is_unordered && op->cond() == lir_cond_equal)
1044 || (!is_unordered && op->cond() == lir_cond_notEqual))
1045 __ br(Assembler::VS, *(op->ublock()->label()));
1046 switch(op->cond()) {
1047 case lir_cond_equal: acond = Assembler::EQ; break;
1048 case lir_cond_notEqual: acond = Assembler::NE; break;
1049 case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1050 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1051 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1052 case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1053 default: ShouldNotReachHere();
1054 acond = Assembler::EQ; // unreachable
1055 }
1056 } else {
1057 switch (op->cond()) {
1058 case lir_cond_equal: acond = Assembler::EQ; break;
1059 case lir_cond_notEqual: acond = Assembler::NE; break;
1060 case lir_cond_less: acond = Assembler::LT; break;
1061 case lir_cond_lessEqual: acond = Assembler::LE; break;
1062 case lir_cond_greaterEqual: acond = Assembler::GE; break;
1063 case lir_cond_greater: acond = Assembler::GT; break;
1064 case lir_cond_belowEqual: acond = Assembler::LS; break;
1065 case lir_cond_aboveEqual: acond = Assembler::HS; break;
1066 default: ShouldNotReachHere();
1067 acond = Assembler::EQ; // unreachable
1068 }
1069 }
1070 __ br(acond,*(op->label()));
1071 }
1072 }
1073
1074
1075
1076 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1077 LIR_Opr src = op->in_opr();
1078 LIR_Opr dest = op->result_opr();
1079
1080 switch (op->bytecode()) {
1081 case Bytecodes::_i2f:
1082 {
1083 __ scvtfws(dest->as_float_reg(), src->as_register());
1084 break;
1085 }
1086 case Bytecodes::_i2d:
1087 {
1088 __ scvtfwd(dest->as_double_reg(), src->as_register());
1089 break;
1090 }
1091 case Bytecodes::_l2d:
1092 {
1093 __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1094 break;
1095 }
1096 case Bytecodes::_l2f:
1097 {
1098 __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1099 break;
1100 }
1101 case Bytecodes::_f2d:
1102 {
1103 __ fcvts(dest->as_double_reg(), src->as_float_reg());
1104 break;
1105 }
1106 case Bytecodes::_d2f:
1107 {
1108 __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1109 break;
1110 }
1111 case Bytecodes::_i2c:
1112 {
1113 __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1114 break;
1115 }
1116 case Bytecodes::_i2l:
1117 {
1118 __ sxtw(dest->as_register_lo(), src->as_register());
1119 break;
1120 }
1121 case Bytecodes::_i2s:
1122 {
1123 __ sxth(dest->as_register(), src->as_register());
1124 break;
1125 }
1126 case Bytecodes::_i2b:
1127 {
1128 __ sxtb(dest->as_register(), src->as_register());
1129 break;
1130 }
1131 case Bytecodes::_l2i:
1132 {
1133 _masm->block_comment("FIXME: This could be a no-op");
1134 __ uxtw(dest->as_register(), src->as_register_lo());
1135 break;
1136 }
1137 case Bytecodes::_d2l:
1138 {
1139 __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1140 break;
1141 }
1142 case Bytecodes::_f2i:
1143 {
1144 __ fcvtzsw(dest->as_register(), src->as_float_reg());
1145 break;
1146 }
1147 case Bytecodes::_f2l:
1148 {
1149 __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1150 break;
1151 }
1152 case Bytecodes::_d2i:
1153 {
1154 __ fcvtzdw(dest->as_register(), src->as_double_reg());
1155 break;
1156 }
1157 default: ShouldNotReachHere();
1158 }
1159 }
1160
1161 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1162 if (op->init_check()) {
1163 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1164 __ ldarb(rscratch1, rscratch1);
1165 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1166 add_debug_info_for_null_check_here(op->stub()->info());
1167 __ br(Assembler::NE, *op->stub()->entry());
1168 }
1169 __ allocate_object(op->obj()->as_register(),
1170 op->tmp1()->as_register(),
1171 op->tmp2()->as_register(),
1172 op->header_size(),
1173 op->object_size(),
1174 op->klass()->as_register(),
1175 *op->stub()->entry());
1176 __ bind(*op->stub()->continuation());
1177 }
1178
1179 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1180 Register len = op->len()->as_register();
1181 __ uxtw(len, len);
1182
1183 if (UseSlowPath ||
1184 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1185 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1186 __ b(*op->stub()->entry());
1187 } else {
1188 Register tmp1 = op->tmp1()->as_register();
1189 Register tmp2 = op->tmp2()->as_register();
1190 Register tmp3 = op->tmp3()->as_register();
1191 if (len == tmp1) {
1192 tmp1 = tmp3;
1193 } else if (len == tmp2) {
1194 tmp2 = tmp3;
1195 } else if (len == tmp3) {
1196 // everything is ok
1197 } else {
1198 __ mov(tmp3, len);
1199 }
1200 __ allocate_array(op->obj()->as_register(),
1201 len,
1202 tmp1,
1203 tmp2,
1204 arrayOopDesc::base_offset_in_bytes(op->type()),
1205 array_element_size(op->type()),
1206 op->klass()->as_register(),
1207 *op->stub()->entry(),
1208 op->zero_array());
1209 }
1210 __ bind(*op->stub()->continuation());
1211 }
1212
1213 void LIR_Assembler::type_profile_helper(Register mdo,
1214 ciMethodData *md, ciProfileData *data,
1215 Register recv, Label* update_done) {
1216
1217 // Given a profile data offset, generate an Address which points to
1218 // the corresponding slot in mdo->data().
1219 // Clobbers rscratch2.
1220 auto slot_at = [=](ByteSize offset) -> Address {
1221 return __ form_address(rscratch2, mdo,
1222 md->byte_offset_of_slot(data, offset),
1223 LogBytesPerWord);
1224 };
1225
1226 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1227 Label next_test;
1228 // See if the receiver is receiver[n].
1229 __ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
1230 __ cmp(recv, rscratch1);
1231 __ br(Assembler::NE, next_test);
1232 __ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
1233 DataLayout::counter_increment);
1234 __ b(*update_done);
1235 __ bind(next_test);
1236 }
1237
1238 // Didn't find receiver; find next empty slot and fill it in
1239 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1240 Label next_test;
1241 Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
1242 __ ldr(rscratch1, recv_addr);
1243 __ cbnz(rscratch1, next_test);
1244 __ str(recv, recv_addr);
1245 __ mov(rscratch1, DataLayout::counter_increment);
1246 __ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
1247 __ b(*update_done);
1248 __ bind(next_test);
1249 }
1250 }
1251
1252 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1253 // we always need a stub for the failure case.
1254 CodeStub* stub = op->stub();
1255 Register obj = op->object()->as_register();
1256 Register k_RInfo = op->tmp1()->as_register();
1257 Register klass_RInfo = op->tmp2()->as_register();
1258 Register dst = op->result_opr()->as_register();
1259 ciKlass* k = op->klass();
1260 Register Rtmp1 = noreg;
1261
1262 // check if it needs to be profiled
1263 ciMethodData* md;
1264 ciProfileData* data;
1265
1266 const bool should_profile = op->should_profile();
1267
1268 if (should_profile) {
1269 ciMethod* method = op->profiled_method();
1270 assert(method != nullptr, "Should have method");
1271 int bci = op->profiled_bci();
1272 md = method->method_data_or_null();
1273 assert(md != nullptr, "Sanity");
1274 data = md->bci_to_data(bci);
1275 assert(data != nullptr, "need data for type check");
1276 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1277 }
1278 Label* success_target = success;
1279 Label* failure_target = failure;
1280
1281 if (obj == k_RInfo) {
1282 k_RInfo = dst;
1283 } else if (obj == klass_RInfo) {
1284 klass_RInfo = dst;
1285 }
1286 if (k->is_loaded() && !UseCompressedClassPointers) {
1287 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1288 } else {
1289 Rtmp1 = op->tmp3()->as_register();
1290 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1291 }
1292
1293 assert_different_registers(obj, k_RInfo, klass_RInfo);
1294
1295 if (should_profile) {
1296 Register mdo = klass_RInfo;
1297 __ mov_metadata(mdo, md->constant_encoding());
1298 Label not_null;
1299 __ cbnz(obj, not_null);
1300 // Object is null; update MDO and exit
1301 Address data_addr
1302 = __ form_address(rscratch2, mdo,
1303 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1304 0);
1305 __ ldrb(rscratch1, data_addr);
1306 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1307 __ strb(rscratch1, data_addr);
1308 __ b(*obj_is_null);
1309 __ bind(not_null);
1310
1311 Label update_done;
1312 Register recv = k_RInfo;
1313 __ load_klass(recv, obj);
1314 type_profile_helper(mdo, md, data, recv, &update_done);
1315 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1316 __ addptr(counter_addr, DataLayout::counter_increment);
1317
1318 __ bind(update_done);
1319 } else {
1320 __ cbz(obj, *obj_is_null);
1321 }
1322
1323 if (!k->is_loaded()) {
1324 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1325 } else {
1326 __ mov_metadata(k_RInfo, k->constant_encoding());
1327 }
1328 __ verify_oop(obj);
1329
1330 if (op->fast_check()) {
1331 // get object class
1332 // not a safepoint as obj null check happens earlier
1333 __ load_klass(rscratch1, obj);
1334 __ cmp( rscratch1, k_RInfo);
1335
1336 __ br(Assembler::NE, *failure_target);
1337 // successful cast, fall through to profile or jump
1338 } else {
1339 // get object class
1340 // not a safepoint as obj null check happens earlier
1341 __ load_klass(klass_RInfo, obj);
1342 if (k->is_loaded()) {
1343 // See if we get an immediate positive hit
1344 __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1345 __ cmp(k_RInfo, rscratch1);
1346 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1347 __ br(Assembler::NE, *failure_target);
1348 // successful cast, fall through to profile or jump
1349 } else {
1350 // See if we get an immediate positive hit
1351 __ br(Assembler::EQ, *success_target);
1352 // check for self
1353 __ cmp(klass_RInfo, k_RInfo);
1354 __ br(Assembler::EQ, *success_target);
1355
1356 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1357 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1358 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1359 // result is a boolean
1360 __ cbzw(klass_RInfo, *failure_target);
1361 // successful cast, fall through to profile or jump
1362 }
1363 } else {
1364 // perform the fast part of the checking logic
1365 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1366 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1367 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1368 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1369 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1370 // result is a boolean
1371 __ cbz(k_RInfo, *failure_target);
1372 // successful cast, fall through to profile or jump
1373 }
1374 }
1375 __ b(*success);
1376 }
1377
1378
1379 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1380 const bool should_profile = op->should_profile();
1381
1382 LIR_Code code = op->code();
1383 if (code == lir_store_check) {
1384 Register value = op->object()->as_register();
1385 Register array = op->array()->as_register();
1386 Register k_RInfo = op->tmp1()->as_register();
1387 Register klass_RInfo = op->tmp2()->as_register();
1388 Register Rtmp1 = op->tmp3()->as_register();
1389
1390 CodeStub* stub = op->stub();
1391
1392 // check if it needs to be profiled
1393 ciMethodData* md;
1394 ciProfileData* data;
1395
1396 if (should_profile) {
1397 ciMethod* method = op->profiled_method();
1398 assert(method != nullptr, "Should have method");
1399 int bci = op->profiled_bci();
1400 md = method->method_data_or_null();
1401 assert(md != nullptr, "Sanity");
1402 data = md->bci_to_data(bci);
1403 assert(data != nullptr, "need data for type check");
1404 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1405 }
1406 Label done;
1407 Label* success_target = &done;
1408 Label* failure_target = stub->entry();
1409
1410 if (should_profile) {
1411 Label not_null;
1412 Register mdo = klass_RInfo;
1413 __ mov_metadata(mdo, md->constant_encoding());
1414 __ cbnz(value, not_null);
1415 // Object is null; update MDO and exit
1416 Address data_addr
1417 = __ form_address(rscratch2, mdo,
1418 md->byte_offset_of_slot(data, DataLayout::flags_offset()), 0);
1419 __ ldrb(rscratch1, data_addr);
1420 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1421 __ strb(rscratch1, data_addr);
1422 __ b(done);
1423 __ bind(not_null);
1424
1425 Label update_done;
1426 Register recv = k_RInfo;
1427 __ load_klass(recv, value);
1428 type_profile_helper(mdo, md, data, recv, &update_done);
1429 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1430 __ addptr(counter_addr, DataLayout::counter_increment);
1431 __ bind(update_done);
1432 } else {
1433 __ cbz(value, done);
1434 }
1435
1436 add_debug_info_for_null_check_here(op->info_for_exception());
1437 __ load_klass(k_RInfo, array);
1438 __ load_klass(klass_RInfo, value);
1439
1440 // get instance klass (it's already uncompressed)
1441 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1442 // perform the fast part of the checking logic
1443 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1444 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1445 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1446 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1447 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1448 // result is a boolean
1449 __ cbzw(k_RInfo, *failure_target);
1450 // fall through to the success case
1451
1452 __ bind(done);
1453 } else if (code == lir_checkcast) {
1454 Register obj = op->object()->as_register();
1455 Register dst = op->result_opr()->as_register();
1456 Label success;
1457 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1458 __ bind(success);
1459 if (dst != obj) {
1460 __ mov(dst, obj);
1461 }
1462 } else if (code == lir_instanceof) {
1463 Register obj = op->object()->as_register();
1464 Register dst = op->result_opr()->as_register();
1465 Label success, failure, done;
1466 emit_typecheck_helper(op, &success, &failure, &failure);
1467 __ bind(failure);
1468 __ mov(dst, zr);
1469 __ b(done);
1470 __ bind(success);
1471 __ mov(dst, 1);
1472 __ bind(done);
1473 } else {
1474 ShouldNotReachHere();
1475 }
1476 }
1477
1478 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1479 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1480 __ cset(rscratch1, Assembler::NE);
1481 __ membar(__ AnyAny);
1482 }
1483
1484 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1485 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1486 __ cset(rscratch1, Assembler::NE);
1487 __ membar(__ AnyAny);
1488 }
1489
1490
1491 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1492 Register addr;
1493 if (op->addr()->is_register()) {
1494 addr = as_reg(op->addr());
1495 } else {
1496 assert(op->addr()->is_address(), "what else?");
1497 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1498 assert(addr_ptr->disp() == 0, "need 0 disp");
1499 assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index");
1500 addr = as_reg(addr_ptr->base());
1501 }
1502 Register newval = as_reg(op->new_value());
1503 Register cmpval = as_reg(op->cmp_value());
1504
1505 if (op->code() == lir_cas_obj) {
1506 if (UseCompressedOops) {
1507 Register t1 = op->tmp1()->as_register();
1508 assert(op->tmp1()->is_valid(), "must be");
1509 __ encode_heap_oop(t1, cmpval);
1510 cmpval = t1;
1511 __ encode_heap_oop(rscratch2, newval);
1512 newval = rscratch2;
1513 casw(addr, newval, cmpval);
1514 } else {
1515 casl(addr, newval, cmpval);
1516 }
1517 } else if (op->code() == lir_cas_int) {
1518 casw(addr, newval, cmpval);
1519 } else {
1520 casl(addr, newval, cmpval);
1521 }
1522 }
1523
1524
1525 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1526 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1527 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on aarch64");
1528
1529 Assembler::Condition acond, ncond;
1530 switch (condition) {
1531 case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;
1532 case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;
1533 case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;
1534 case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;
1535 case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1536 case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;
1537 case lir_cond_belowEqual:
1538 case lir_cond_aboveEqual:
1539 default: ShouldNotReachHere();
1540 acond = Assembler::EQ; ncond = Assembler::NE; // unreachable
1541 }
1542
1543 assert(result->is_single_cpu() || result->is_double_cpu(),
1544 "expect single register for result");
1545 if (opr1->is_constant() && opr2->is_constant()
1546 && opr1->type() == T_INT && opr2->type() == T_INT) {
1547 jint val1 = opr1->as_jint();
1548 jint val2 = opr2->as_jint();
1549 if (val1 == 0 && val2 == 1) {
1550 __ cset(result->as_register(), ncond);
1551 return;
1552 } else if (val1 == 1 && val2 == 0) {
1553 __ cset(result->as_register(), acond);
1554 return;
1555 }
1556 }
1557
1558 if (opr1->is_constant() && opr2->is_constant()
1559 && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1560 jlong val1 = opr1->as_jlong();
1561 jlong val2 = opr2->as_jlong();
1562 if (val1 == 0 && val2 == 1) {
1563 __ cset(result->as_register_lo(), ncond);
1564 return;
1565 } else if (val1 == 1 && val2 == 0) {
1566 __ cset(result->as_register_lo(), acond);
1567 return;
1568 }
1569 }
1570
1571 if (opr1->is_stack()) {
1572 stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1573 opr1 = FrameMap::rscratch1_opr;
1574 } else if (opr1->is_constant()) {
1575 LIR_Opr tmp
1576 = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1577 const2reg(opr1, tmp, lir_patch_none, nullptr);
1578 opr1 = tmp;
1579 }
1580
1581 if (opr2->is_stack()) {
1582 stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1583 opr2 = FrameMap::rscratch2_opr;
1584 } else if (opr2->is_constant()) {
1585 LIR_Opr tmp
1586 = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1587 const2reg(opr2, tmp, lir_patch_none, nullptr);
1588 opr2 = tmp;
1589 }
1590
1591 if (result->type() == T_LONG)
1592 __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1593 else
1594 __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1595 }
1596
1597 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1598 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1599
1600 if (left->is_single_cpu()) {
1601 Register lreg = left->as_register();
1602 Register dreg = as_reg(dest);
1603
1604 if (right->is_single_cpu()) {
1605 // cpu register - cpu register
1606
1607 assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1608 "should be");
1609 Register rreg = right->as_register();
1610 switch (code) {
1611 case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1612 case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1613 case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1614 default: ShouldNotReachHere();
1615 }
1616
1617 } else if (right->is_double_cpu()) {
1618 Register rreg = right->as_register_lo();
1619 // single_cpu + double_cpu: can happen with obj+long
1620 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1621 switch (code) {
1622 case lir_add: __ add(dreg, lreg, rreg); break;
1623 case lir_sub: __ sub(dreg, lreg, rreg); break;
1624 default: ShouldNotReachHere();
1625 }
1626 } else if (right->is_constant()) {
1627 // cpu register - constant
1628 jlong c;
1629
1630 // FIXME. This is fugly: we really need to factor all this logic.
1631 switch(right->type()) {
1632 case T_LONG:
1633 c = right->as_constant_ptr()->as_jlong();
1634 break;
1635 case T_INT:
1636 case T_ADDRESS:
1637 c = right->as_constant_ptr()->as_jint();
1638 break;
1639 default:
1640 ShouldNotReachHere();
1641 c = 0; // unreachable
1642 break;
1643 }
1644
1645 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1646 if (c == 0 && dreg == lreg) {
1647 COMMENT("effective nop elided");
1648 return;
1649 }
1650 switch(left->type()) {
1651 case T_INT:
1652 switch (code) {
1653 case lir_add: __ addw(dreg, lreg, c); break;
1654 case lir_sub: __ subw(dreg, lreg, c); break;
1655 default: ShouldNotReachHere();
1656 }
1657 break;
1658 case T_OBJECT:
1659 case T_ADDRESS:
1660 switch (code) {
1661 case lir_add: __ add(dreg, lreg, c); break;
1662 case lir_sub: __ sub(dreg, lreg, c); break;
1663 default: ShouldNotReachHere();
1664 }
1665 break;
1666 default:
1667 ShouldNotReachHere();
1668 }
1669 } else {
1670 ShouldNotReachHere();
1671 }
1672
1673 } else if (left->is_double_cpu()) {
1674 Register lreg_lo = left->as_register_lo();
1675
1676 if (right->is_double_cpu()) {
1677 // cpu register - cpu register
1678 Register rreg_lo = right->as_register_lo();
1679 switch (code) {
1680 case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1681 case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1682 case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1683 case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1684 case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1685 default:
1686 ShouldNotReachHere();
1687 }
1688
1689 } else if (right->is_constant()) {
1690 jlong c = right->as_constant_ptr()->as_jlong();
1691 Register dreg = as_reg(dest);
1692 switch (code) {
1693 case lir_add:
1694 case lir_sub:
1695 if (c == 0 && dreg == lreg_lo) {
1696 COMMENT("effective nop elided");
1697 return;
1698 }
1699 code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);
1700 break;
1701 case lir_div:
1702 assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1703 if (c == 1) {
1704 // move lreg_lo to dreg if divisor is 1
1705 __ mov(dreg, lreg_lo);
1706 } else {
1707 unsigned int shift = log2i_exact(c);
1708 // use rscratch1 as intermediate result register
1709 __ asr(rscratch1, lreg_lo, 63);
1710 __ add(rscratch1, lreg_lo, rscratch1, Assembler::LSR, 64 - shift);
1711 __ asr(dreg, rscratch1, shift);
1712 }
1713 break;
1714 case lir_rem:
1715 assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1716 if (c == 1) {
1717 // move 0 to dreg if divisor is 1
1718 __ mov(dreg, zr);
1719 } else {
1720 // use rscratch1 as intermediate result register
1721 __ negs(rscratch1, lreg_lo);
1722 __ andr(dreg, lreg_lo, c - 1);
1723 __ andr(rscratch1, rscratch1, c - 1);
1724 __ csneg(dreg, dreg, rscratch1, Assembler::MI);
1725 }
1726 break;
1727 default:
1728 ShouldNotReachHere();
1729 }
1730 } else {
1731 ShouldNotReachHere();
1732 }
1733 } else if (left->is_single_fpu()) {
1734 assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1735 switch (code) {
1736 case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1737 case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1738 case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1739 case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1740 default:
1741 ShouldNotReachHere();
1742 }
1743 } else if (left->is_double_fpu()) {
1744 if (right->is_double_fpu()) {
1745 // fpu register - fpu register
1746 switch (code) {
1747 case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1748 case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1749 case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1750 case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1751 default:
1752 ShouldNotReachHere();
1753 }
1754 } else {
1755 if (right->is_constant()) {
1756 ShouldNotReachHere();
1757 }
1758 ShouldNotReachHere();
1759 }
1760 } else if (left->is_single_stack() || left->is_address()) {
1761 assert(left == dest, "left and dest must be equal");
1762 ShouldNotReachHere();
1763 } else {
1764 ShouldNotReachHere();
1765 }
1766 }
1767
1768 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1769 switch(code) {
1770 case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1771 case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1772 case lir_f2hf: __ flt_to_flt16(dest->as_register(), value->as_float_reg(), tmp->as_float_reg()); break;
1773 case lir_hf2f: __ flt16_to_flt(dest->as_float_reg(), value->as_register(), tmp->as_float_reg()); break;
1774 default : ShouldNotReachHere();
1775 }
1776 }
1777
1778 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1779
1780 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1781 Register Rleft = left->is_single_cpu() ? left->as_register() :
1782 left->as_register_lo();
1783 if (dst->is_single_cpu()) {
1784 Register Rdst = dst->as_register();
1785 if (right->is_constant()) {
1786 switch (code) {
1787 case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1788 case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break;
1789 case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1790 default: ShouldNotReachHere(); break;
1791 }
1792 } else {
1793 Register Rright = right->is_single_cpu() ? right->as_register() :
1794 right->as_register_lo();
1795 switch (code) {
1796 case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1797 case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break;
1798 case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1799 default: ShouldNotReachHere(); break;
1800 }
1801 }
1802 } else {
1803 Register Rdst = dst->as_register_lo();
1804 if (right->is_constant()) {
1805 switch (code) {
1806 case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1807 case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break;
1808 case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1809 default: ShouldNotReachHere(); break;
1810 }
1811 } else {
1812 Register Rright = right->is_single_cpu() ? right->as_register() :
1813 right->as_register_lo();
1814 switch (code) {
1815 case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1816 case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;
1817 case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1818 default: ShouldNotReachHere(); break;
1819 }
1820 }
1821 }
1822 }
1823
1824
1825
1826 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) {
1827
1828 // opcode check
1829 assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");
1830 bool is_irem = (code == lir_irem);
1831
1832 // operand check
1833 assert(left->is_single_cpu(), "left must be register");
1834 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
1835 assert(result->is_single_cpu(), "result must be register");
1836 Register lreg = left->as_register();
1837 Register dreg = result->as_register();
1838
1839 // power-of-2 constant check and codegen
1840 if (right->is_constant()) {
1841 int c = right->as_constant_ptr()->as_jint();
1842 assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1843 if (is_irem) {
1844 if (c == 1) {
1845 // move 0 to dreg if divisor is 1
1846 __ movw(dreg, zr);
1847 } else {
1848 // use rscratch1 as intermediate result register
1849 __ negsw(rscratch1, lreg);
1850 __ andw(dreg, lreg, c - 1);
1851 __ andw(rscratch1, rscratch1, c - 1);
1852 __ csnegw(dreg, dreg, rscratch1, Assembler::MI);
1853 }
1854 } else {
1855 if (c == 1) {
1856 // move lreg to dreg if divisor is 1
1857 __ movw(dreg, lreg);
1858 } else {
1859 unsigned int shift = exact_log2(c);
1860 // use rscratch1 as intermediate result register
1861 __ asrw(rscratch1, lreg, 31);
1862 __ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);
1863 __ asrw(dreg, rscratch1, shift);
1864 }
1865 }
1866 } else {
1867 Register rreg = right->as_register();
1868 __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1869 }
1870 }
1871
1872
1873 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1874 if (opr1->is_constant() && opr2->is_single_cpu()) {
1875 // tableswitch
1876 Register reg = as_reg(opr2);
1877 struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1878 __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1879 } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1880 Register reg1 = as_reg(opr1);
1881 if (opr2->is_single_cpu()) {
1882 // cpu register - cpu register
1883 Register reg2 = opr2->as_register();
1884 if (is_reference_type(opr1->type())) {
1885 __ cmpoop(reg1, reg2);
1886 } else {
1887 assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1888 __ cmpw(reg1, reg2);
1889 }
1890 return;
1891 }
1892 if (opr2->is_double_cpu()) {
1893 // cpu register - cpu register
1894 Register reg2 = opr2->as_register_lo();
1895 __ cmp(reg1, reg2);
1896 return;
1897 }
1898
1899 if (opr2->is_constant()) {
1900 bool is_32bit = false; // width of register operand
1901 jlong imm;
1902
1903 switch(opr2->type()) {
1904 case T_INT:
1905 imm = opr2->as_constant_ptr()->as_jint();
1906 is_32bit = true;
1907 break;
1908 case T_LONG:
1909 imm = opr2->as_constant_ptr()->as_jlong();
1910 break;
1911 case T_ADDRESS:
1912 imm = opr2->as_constant_ptr()->as_jint();
1913 break;
1914 case T_METADATA:
1915 imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1916 break;
1917 case T_OBJECT:
1918 case T_ARRAY:
1919 jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1920 __ cmpoop(reg1, rscratch1);
1921 return;
1922 default:
1923 ShouldNotReachHere();
1924 imm = 0; // unreachable
1925 break;
1926 }
1927
1928 if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1929 if (is_32bit)
1930 __ cmpw(reg1, imm);
1931 else
1932 __ subs(zr, reg1, imm);
1933 return;
1934 } else {
1935 __ mov(rscratch1, imm);
1936 if (is_32bit)
1937 __ cmpw(reg1, rscratch1);
1938 else
1939 __ cmp(reg1, rscratch1);
1940 return;
1941 }
1942 } else
1943 ShouldNotReachHere();
1944 } else if (opr1->is_single_fpu()) {
1945 FloatRegister reg1 = opr1->as_float_reg();
1946 assert(opr2->is_single_fpu(), "expect single float register");
1947 FloatRegister reg2 = opr2->as_float_reg();
1948 __ fcmps(reg1, reg2);
1949 } else if (opr1->is_double_fpu()) {
1950 FloatRegister reg1 = opr1->as_double_reg();
1951 assert(opr2->is_double_fpu(), "expect double float register");
1952 FloatRegister reg2 = opr2->as_double_reg();
1953 __ fcmpd(reg1, reg2);
1954 } else {
1955 ShouldNotReachHere();
1956 }
1957 }
1958
1959 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1960 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1961 bool is_unordered_less = (code == lir_ucmp_fd2i);
1962 if (left->is_single_fpu()) {
1963 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1964 } else if (left->is_double_fpu()) {
1965 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1966 } else {
1967 ShouldNotReachHere();
1968 }
1969 } else if (code == lir_cmp_l2i) {
1970 Label done;
1971 __ cmp(left->as_register_lo(), right->as_register_lo());
1972 __ mov(dst->as_register(), (uint64_t)-1L);
1973 __ br(Assembler::LT, done);
1974 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1975 __ bind(done);
1976 } else {
1977 ShouldNotReachHere();
1978 }
1979 }
1980
1981
1982 void LIR_Assembler::align_call(LIR_Code code) { }
1983
1984
1985 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1986 address call = __ trampoline_call(Address(op->addr(), rtype));
1987 if (call == nullptr) {
1988 bailout("trampoline stub overflow");
1989 return;
1990 }
1991 add_call_info(code_offset(), op->info());
1992 __ post_call_nop();
1993 }
1994
1995
1996 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1997 address call = __ ic_call(op->addr());
1998 if (call == nullptr) {
1999 bailout("trampoline stub overflow");
2000 return;
2001 }
2002 add_call_info(code_offset(), op->info());
2003 __ post_call_nop();
2004 }
2005
2006 void LIR_Assembler::emit_static_call_stub() {
2007 address call_pc = __ pc();
2008 address stub = __ start_a_stub(call_stub_size());
2009 if (stub == nullptr) {
2010 bailout("static call stub overflow");
2011 return;
2012 }
2013
2014 int start = __ offset();
2015
2016 __ relocate(static_stub_Relocation::spec(call_pc));
2017 __ emit_static_call_stub();
2018
2019 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2020 <= call_stub_size(), "stub too big");
2021 __ end_a_stub();
2022 }
2023
2024
2025 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2026 assert(exceptionOop->as_register() == r0, "must match");
2027 assert(exceptionPC->as_register() == r3, "must match");
2028
2029 // exception object is not added to oop map by LinearScan
2030 // (LinearScan assumes that no oops are in fixed registers)
2031 info->add_register_oop(exceptionOop);
2032 StubId unwind_id;
2033
2034 // get current pc information
2035 // pc is only needed if the method has an exception handler, the unwind code does not need it.
2036 if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
2037 // As no instructions have been generated yet for this LIR node it's
2038 // possible that an oop map already exists for the current offset.
2039 // In that case insert an dummy NOP here to ensure all oop map PCs
2040 // are unique. See JDK-8237483.
2041 __ nop();
2042 }
2043 int pc_for_athrow_offset = __ offset();
2044 InternalAddress pc_for_athrow(__ pc());
2045 __ adr(exceptionPC->as_register(), pc_for_athrow);
2046 add_call_info(pc_for_athrow_offset, info); // for exception handler
2047
2048 __ verify_not_null_oop(r0);
2049 // search an exception handler (r0: exception oop, r3: throwing pc)
2050 if (compilation()->has_fpu_code()) {
2051 unwind_id = StubId::c1_handle_exception_id;
2052 } else {
2053 unwind_id = StubId::c1_handle_exception_nofpu_id;
2054 }
2055 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2056
2057 // FIXME: enough room for two byte trap ????
2058 __ nop();
2059 }
2060
2061
2062 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2063 assert(exceptionOop->as_register() == r0, "must match");
2064
2065 __ b(_unwind_handler_entry);
2066 }
2067
2068
2069 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2070 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2071 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2072
2073 switch (left->type()) {
2074 case T_INT: {
2075 switch (code) {
2076 case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;
2077 case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;
2078 case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2079 default:
2080 ShouldNotReachHere();
2081 break;
2082 }
2083 break;
2084 case T_LONG:
2085 case T_ADDRESS:
2086 case T_OBJECT:
2087 switch (code) {
2088 case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;
2089 case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;
2090 case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2091 default:
2092 ShouldNotReachHere();
2093 break;
2094 }
2095 break;
2096 default:
2097 ShouldNotReachHere();
2098 break;
2099 }
2100 }
2101 }
2102
2103
2104 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2105 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2106 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2107
2108 switch (left->type()) {
2109 case T_INT: {
2110 switch (code) {
2111 case lir_shl: __ lslw (dreg, lreg, count); break;
2112 case lir_shr: __ asrw (dreg, lreg, count); break;
2113 case lir_ushr: __ lsrw (dreg, lreg, count); break;
2114 default:
2115 ShouldNotReachHere();
2116 break;
2117 }
2118 break;
2119 case T_LONG:
2120 case T_ADDRESS:
2121 case T_OBJECT:
2122 switch (code) {
2123 case lir_shl: __ lsl (dreg, lreg, count); break;
2124 case lir_shr: __ asr (dreg, lreg, count); break;
2125 case lir_ushr: __ lsr (dreg, lreg, count); break;
2126 default:
2127 ShouldNotReachHere();
2128 break;
2129 }
2130 break;
2131 default:
2132 ShouldNotReachHere();
2133 break;
2134 }
2135 }
2136 }
2137
2138
2139 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2140 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2141 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2142 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2143 __ str (r, Address(sp, offset_from_rsp_in_bytes));
2144 }
2145
2146
2147 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2148 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2149 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2150 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2151 __ mov (rscratch1, c);
2152 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2153 }
2154
2155
2156 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2157 ShouldNotReachHere();
2158 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2159 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2160 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2161 __ lea(rscratch1, __ constant_oop_address(o));
2162 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2163 }
2164
2165
2166 // This code replaces a call to arraycopy; no exception may
2167 // be thrown in this code, they must be thrown in the System.arraycopy
2168 // activation frame; we could save some checks if this would not be the case
2169 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2170 ciArrayKlass* default_type = op->expected_type();
2171 Register src = op->src()->as_register();
2172 Register dst = op->dst()->as_register();
2173 Register src_pos = op->src_pos()->as_register();
2174 Register dst_pos = op->dst_pos()->as_register();
2175 Register length = op->length()->as_register();
2176 Register tmp = op->tmp()->as_register();
2177
2178 CodeStub* stub = op->stub();
2179 int flags = op->flags();
2180 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2181 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2182
2183 // if we don't know anything, just go through the generic arraycopy
2184 if (default_type == nullptr // || basic_type == T_OBJECT
2185 ) {
2186 Label done;
2187 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2188
2189 // Save the arguments in case the generic arraycopy fails and we
2190 // have to fall back to the JNI stub
2191 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2192 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2193 __ str(src, Address(sp, 4*BytesPerWord));
2194
2195 address copyfunc_addr = StubRoutines::generic_arraycopy();
2196 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2197
2198 // The arguments are in java calling convention so we shift them
2199 // to C convention
2200 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2201 __ mov(c_rarg0, j_rarg0);
2202 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2203 __ mov(c_rarg1, j_rarg1);
2204 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2205 __ mov(c_rarg2, j_rarg2);
2206 assert_different_registers(c_rarg3, j_rarg4);
2207 __ mov(c_rarg3, j_rarg3);
2208 __ mov(c_rarg4, j_rarg4);
2209 #ifndef PRODUCT
2210 if (PrintC1Statistics) {
2211 __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2212 }
2213 #endif
2214 __ far_call(RuntimeAddress(copyfunc_addr));
2215
2216 __ cbz(r0, *stub->continuation());
2217
2218 // Reload values from the stack so they are where the stub
2219 // expects them.
2220 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2221 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2222 __ ldr(src, Address(sp, 4*BytesPerWord));
2223
2224 // r0 is -1^K where K == partial copied count
2225 __ eonw(rscratch1, r0, zr);
2226 // adjust length down and src/end pos up by partial copied count
2227 __ subw(length, length, rscratch1);
2228 __ addw(src_pos, src_pos, rscratch1);
2229 __ addw(dst_pos, dst_pos, rscratch1);
2230 __ b(*stub->entry());
2231
2232 __ bind(*stub->continuation());
2233 return;
2234 }
2235
2236 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2237
2238 int elem_size = type2aelembytes(basic_type);
2239 int scale = exact_log2(elem_size);
2240
2241 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2242 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2243
2244 // test for null
2245 if (flags & LIR_OpArrayCopy::src_null_check) {
2246 __ cbz(src, *stub->entry());
2247 }
2248 if (flags & LIR_OpArrayCopy::dst_null_check) {
2249 __ cbz(dst, *stub->entry());
2250 }
2251
2252 // If the compiler was not able to prove that exact type of the source or the destination
2253 // of the arraycopy is an array type, check at runtime if the source or the destination is
2254 // an instance type.
2255 if (flags & LIR_OpArrayCopy::type_check) {
2256 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2257 __ load_klass(tmp, dst);
2258 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2259 __ cmpw(rscratch1, Klass::_lh_neutral_value);
2260 __ br(Assembler::GE, *stub->entry());
2261 }
2262
2263 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2264 __ load_klass(tmp, src);
2265 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2266 __ cmpw(rscratch1, Klass::_lh_neutral_value);
2267 __ br(Assembler::GE, *stub->entry());
2268 }
2269 }
2270
2271 // check if negative
2272 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2273 __ cmpw(src_pos, 0);
2274 __ br(Assembler::LT, *stub->entry());
2275 }
2276 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2277 __ cmpw(dst_pos, 0);
2278 __ br(Assembler::LT, *stub->entry());
2279 }
2280
2281 if (flags & LIR_OpArrayCopy::length_positive_check) {
2282 __ cmpw(length, 0);
2283 __ br(Assembler::LT, *stub->entry());
2284 }
2285
2286 if (flags & LIR_OpArrayCopy::src_range_check) {
2287 __ addw(tmp, src_pos, length);
2288 __ ldrw(rscratch1, src_length_addr);
2289 __ cmpw(tmp, rscratch1);
2290 __ br(Assembler::HI, *stub->entry());
2291 }
2292 if (flags & LIR_OpArrayCopy::dst_range_check) {
2293 __ addw(tmp, dst_pos, length);
2294 __ ldrw(rscratch1, dst_length_addr);
2295 __ cmpw(tmp, rscratch1);
2296 __ br(Assembler::HI, *stub->entry());
2297 }
2298
2299 if (flags & LIR_OpArrayCopy::type_check) {
2300 // We don't know the array types are compatible
2301 if (basic_type != T_OBJECT) {
2302 // Simple test for basic type arrays
2303 __ cmp_klasses_from_objects(src, dst, tmp, rscratch1);
2304 __ br(Assembler::NE, *stub->entry());
2305 } else {
2306 // For object arrays, if src is a sub class of dst then we can
2307 // safely do the copy.
2308 Label cont, slow;
2309
2310 #define PUSH(r1, r2) \
2311 stp(r1, r2, __ pre(sp, -2 * wordSize));
2312
2313 #define POP(r1, r2) \
2314 ldp(r1, r2, __ post(sp, 2 * wordSize));
2315
2316 __ PUSH(src, dst);
2317
2318 __ load_klass(src, src);
2319 __ load_klass(dst, dst);
2320
2321 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2322
2323 __ PUSH(src, dst);
2324 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2325 __ POP(src, dst);
2326
2327 __ cbnz(src, cont);
2328
2329 __ bind(slow);
2330 __ POP(src, dst);
2331
2332 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2333 if (copyfunc_addr != nullptr) { // use stub if available
2334 // src is not a sub class of dst so we have to do a
2335 // per-element check.
2336
2337 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2338 if ((flags & mask) != mask) {
2339 // Check that at least both of them object arrays.
2340 assert(flags & mask, "one of the two should be known to be an object array");
2341
2342 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2343 __ load_klass(tmp, src);
2344 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2345 __ load_klass(tmp, dst);
2346 }
2347 int lh_offset = in_bytes(Klass::layout_helper_offset());
2348 Address klass_lh_addr(tmp, lh_offset);
2349 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2350 __ ldrw(rscratch1, klass_lh_addr);
2351 __ mov(rscratch2, objArray_lh);
2352 __ eorw(rscratch1, rscratch1, rscratch2);
2353 __ cbnzw(rscratch1, *stub->entry());
2354 }
2355
2356 // Spill because stubs can use any register they like and it's
2357 // easier to restore just those that we care about.
2358 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2359 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2360 __ str(src, Address(sp, 4*BytesPerWord));
2361
2362 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2363 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2364 assert_different_registers(c_rarg0, dst, dst_pos, length);
2365 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2366 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2367 assert_different_registers(c_rarg1, dst, length);
2368 __ uxtw(c_rarg2, length);
2369 assert_different_registers(c_rarg2, dst);
2370
2371 __ load_klass(c_rarg4, dst);
2372 __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2373 __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2374 __ far_call(RuntimeAddress(copyfunc_addr));
2375
2376 #ifndef PRODUCT
2377 if (PrintC1Statistics) {
2378 Label failed;
2379 __ cbnz(r0, failed);
2380 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2381 __ bind(failed);
2382 }
2383 #endif
2384
2385 __ cbz(r0, *stub->continuation());
2386
2387 #ifndef PRODUCT
2388 if (PrintC1Statistics) {
2389 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2390 }
2391 #endif
2392 assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2393
2394 // Restore previously spilled arguments
2395 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2396 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2397 __ ldr(src, Address(sp, 4*BytesPerWord));
2398
2399 // return value is -1^K where K is partial copied count
2400 __ eonw(rscratch1, r0, zr);
2401 // adjust length down and src/end pos up by partial copied count
2402 __ subw(length, length, rscratch1);
2403 __ addw(src_pos, src_pos, rscratch1);
2404 __ addw(dst_pos, dst_pos, rscratch1);
2405 }
2406
2407 __ b(*stub->entry());
2408
2409 __ bind(cont);
2410 __ POP(src, dst);
2411 }
2412 }
2413
2414 #ifdef ASSERT
2415 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2416 // Sanity check the known type with the incoming class. For the
2417 // primitive case the types must match exactly with src.klass and
2418 // dst.klass each exactly matching the default type. For the
2419 // object array case, if no type check is needed then either the
2420 // dst type is exactly the expected type and the src type is a
2421 // subtype which we can't check or src is the same array as dst
2422 // but not necessarily exactly of type default_type.
2423 Label known_ok, halt;
2424 __ mov_metadata(tmp, default_type->constant_encoding());
2425
2426 if (basic_type != T_OBJECT) {
2427 __ cmp_klass(dst, tmp, rscratch1);
2428 __ br(Assembler::NE, halt);
2429 __ cmp_klass(src, tmp, rscratch1);
2430 __ br(Assembler::EQ, known_ok);
2431 } else {
2432 __ cmp_klass(dst, tmp, rscratch1);
2433 __ br(Assembler::EQ, known_ok);
2434 __ cmp(src, dst);
2435 __ br(Assembler::EQ, known_ok);
2436 }
2437 __ bind(halt);
2438 __ stop("incorrect type information in arraycopy");
2439 __ bind(known_ok);
2440 }
2441 #endif
2442
2443 #ifndef PRODUCT
2444 if (PrintC1Statistics) {
2445 __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2446 }
2447 #endif
2448
2449 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2450 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2451 assert_different_registers(c_rarg0, dst, dst_pos, length);
2452 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2453 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2454 assert_different_registers(c_rarg1, dst, length);
2455 __ uxtw(c_rarg2, length);
2456 assert_different_registers(c_rarg2, dst);
2457
2458 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2459 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2460 const char *name;
2461 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2462
2463 CodeBlob *cb = CodeCache::find_blob(entry);
2464 if (cb) {
2465 __ far_call(RuntimeAddress(entry));
2466 } else {
2467 __ call_VM_leaf(entry, 3);
2468 }
2469
2470 if (stub != nullptr) {
2471 __ bind(*stub->continuation());
2472 }
2473 }
2474
2475
2476
2477
2478 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2479 Register obj = op->obj_opr()->as_register(); // may not be an oop
2480 Register hdr = op->hdr_opr()->as_register();
2481 Register lock = op->lock_opr()->as_register();
2482 Register temp = op->scratch_opr()->as_register();
2483 if (op->code() == lir_lock) {
2484 // add debug info for NullPointerException only if one is possible
2485 int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
2486 if (op->info() != nullptr) {
2487 add_debug_info_for_null_check(null_check_offset, op->info());
2488 }
2489 // done
2490 } else if (op->code() == lir_unlock) {
2491 __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry());
2492 } else {
2493 Unimplemented();
2494 }
2495 __ bind(*op->stub()->continuation());
2496 }
2497
2498 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2499 Register obj = op->obj()->as_pointer_register();
2500 Register result = op->result_opr()->as_pointer_register();
2501
2502 CodeEmitInfo* info = op->info();
2503 if (info != nullptr) {
2504 add_debug_info_for_null_check_here(info);
2505 }
2506
2507 __ load_klass(result, obj);
2508 }
2509
2510 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2511 ciMethod* method = op->profiled_method();
2512 int bci = op->profiled_bci();
2513 ciMethod* callee = op->profiled_callee();
2514
2515 // Update counter for all call types
2516 ciMethodData* md = method->method_data_or_null();
2517 assert(md != nullptr, "Sanity");
2518 ciProfileData* data = md->bci_to_data(bci);
2519 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2520 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2521 Register mdo = op->mdo()->as_register();
2522 __ mov_metadata(mdo, md->constant_encoding());
2523 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2524 // Perform additional virtual call profiling for invokevirtual and
2525 // invokeinterface bytecodes
2526 if (op->should_profile_receiver_type()) {
2527 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2528 Register recv = op->recv()->as_register();
2529 assert_different_registers(mdo, recv);
2530 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2531 ciKlass* known_klass = op->known_holder();
2532 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2533 // We know the type that will be seen at this call site; we can
2534 // statically update the MethodData* rather than needing to do
2535 // dynamic tests on the receiver type
2536
2537 // NOTE: we should probably put a lock around this search to
2538 // avoid collisions by concurrent compilations
2539 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2540 uint i;
2541 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2542 ciKlass* receiver = vc_data->receiver(i);
2543 if (known_klass->equals(receiver)) {
2544 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2545 __ addptr(data_addr, DataLayout::counter_increment);
2546 return;
2547 }
2548 }
2549
2550 // Receiver type not found in profile data; select an empty slot
2551
2552 // Note that this is less efficient than it should be because it
2553 // always does a write to the receiver part of the
2554 // VirtualCallData rather than just the first time
2555 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2556 ciKlass* receiver = vc_data->receiver(i);
2557 if (receiver == nullptr) {
2558 __ mov_metadata(rscratch1, known_klass->constant_encoding());
2559 Address recv_addr =
2560 __ form_address(rscratch2, mdo,
2561 md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
2562 LogBytesPerWord);
2563 __ str(rscratch1, recv_addr);
2564 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2565 __ addptr(data_addr, DataLayout::counter_increment);
2566 return;
2567 }
2568 }
2569 } else {
2570 __ load_klass(recv, recv);
2571 Label update_done;
2572 type_profile_helper(mdo, md, data, recv, &update_done);
2573 // Receiver did not match any saved receiver and there is no empty row for it.
2574 // Increment total counter to indicate polymorphic case.
2575 __ addptr(counter_addr, DataLayout::counter_increment);
2576
2577 __ bind(update_done);
2578 }
2579 } else {
2580 // Static call
2581 __ addptr(counter_addr, DataLayout::counter_increment);
2582 }
2583 }
2584
2585
2586 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2587 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2588 }
2589
2590 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2591 assert(op->crc()->is_single_cpu(), "crc must be register");
2592 assert(op->val()->is_single_cpu(), "byte value must be register");
2593 assert(op->result_opr()->is_single_cpu(), "result must be register");
2594 Register crc = op->crc()->as_register();
2595 Register val = op->val()->as_register();
2596 Register res = op->result_opr()->as_register();
2597
2598 assert_different_registers(val, crc, res);
2599 uint64_t offset;
2600 __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2601 __ add(res, res, offset);
2602
2603 __ mvnw(crc, crc); // ~crc
2604 __ update_byte_crc32(crc, val, res);
2605 __ mvnw(res, crc); // ~crc
2606 }
2607
2608 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2609 COMMENT("emit_profile_type {");
2610 Register obj = op->obj()->as_register();
2611 Register tmp = op->tmp()->as_pointer_register();
2612 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2613 ciKlass* exact_klass = op->exact_klass();
2614 intptr_t current_klass = op->current_klass();
2615 bool not_null = op->not_null();
2616 bool no_conflict = op->no_conflict();
2617
2618 Label update, next, none;
2619
2620 bool do_null = !not_null;
2621 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2622 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2623
2624 assert(do_null || do_update, "why are we here?");
2625 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2626 assert(mdo_addr.base() != rscratch1, "wrong register");
2627
2628 __ verify_oop(obj);
2629
2630 if (tmp != obj) {
2631 assert_different_registers(obj, tmp, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
2632 __ mov(tmp, obj);
2633 } else {
2634 assert_different_registers(obj, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
2635 }
2636 if (do_null) {
2637 __ cbnz(tmp, update);
2638 if (!TypeEntries::was_null_seen(current_klass)) {
2639 __ ldr(rscratch2, mdo_addr);
2640 __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2641 __ str(rscratch2, mdo_addr);
2642 }
2643 if (do_update) {
2644 #ifndef ASSERT
2645 __ b(next);
2646 }
2647 #else
2648 __ b(next);
2649 }
2650 } else {
2651 __ cbnz(tmp, update);
2652 __ stop("unexpected null obj");
2653 #endif
2654 }
2655
2656 __ bind(update);
2657
2658 if (do_update) {
2659 #ifdef ASSERT
2660 if (exact_klass != nullptr) {
2661 Label ok;
2662 __ load_klass(tmp, tmp);
2663 __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2664 __ eor(rscratch1, tmp, rscratch1);
2665 __ cbz(rscratch1, ok);
2666 __ stop("exact klass and actual klass differ");
2667 __ bind(ok);
2668 }
2669 #endif
2670 if (!no_conflict) {
2671 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2672 if (exact_klass != nullptr) {
2673 __ mov_metadata(tmp, exact_klass->constant_encoding());
2674 } else {
2675 __ load_klass(tmp, tmp);
2676 }
2677
2678 __ ldr(rscratch2, mdo_addr);
2679 __ eor(tmp, tmp, rscratch2);
2680 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2681 // klass seen before, nothing to do. The unknown bit may have been
2682 // set already but no need to check.
2683 __ cbz(rscratch1, next);
2684
2685 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2686
2687 if (TypeEntries::is_type_none(current_klass)) {
2688 __ cbz(rscratch2, none);
2689 __ cmp(rscratch2, (u1)TypeEntries::null_seen);
2690 __ br(Assembler::EQ, none);
2691 // There is a chance that the checks above
2692 // fail if another thread has just set the
2693 // profiling to this obj's klass
2694 __ dmb(Assembler::ISHLD);
2695 __ eor(tmp, tmp, rscratch2); // get back original value before XOR
2696 __ ldr(rscratch2, mdo_addr);
2697 __ eor(tmp, tmp, rscratch2);
2698 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2699 __ cbz(rscratch1, next);
2700 }
2701 } else {
2702 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2703 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2704
2705 __ ldr(tmp, mdo_addr);
2706 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2707 }
2708
2709 // different than before. Cannot keep accurate profile.
2710 __ ldr(rscratch2, mdo_addr);
2711 __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2712 __ str(rscratch2, mdo_addr);
2713
2714 if (TypeEntries::is_type_none(current_klass)) {
2715 __ b(next);
2716
2717 __ bind(none);
2718 // first time here. Set profile type.
2719 __ str(tmp, mdo_addr);
2720 #ifdef ASSERT
2721 __ andr(tmp, tmp, TypeEntries::type_mask);
2722 __ verify_klass_ptr(tmp);
2723 #endif
2724 }
2725 } else {
2726 // There's a single possible klass at this profile point
2727 assert(exact_klass != nullptr, "should be");
2728 if (TypeEntries::is_type_none(current_klass)) {
2729 __ mov_metadata(tmp, exact_klass->constant_encoding());
2730 __ ldr(rscratch2, mdo_addr);
2731 __ eor(tmp, tmp, rscratch2);
2732 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2733 __ cbz(rscratch1, next);
2734 #ifdef ASSERT
2735 {
2736 Label ok;
2737 __ ldr(rscratch1, mdo_addr);
2738 __ cbz(rscratch1, ok);
2739 __ cmp(rscratch1, (u1)TypeEntries::null_seen);
2740 __ br(Assembler::EQ, ok);
2741 // may have been set by another thread
2742 __ dmb(Assembler::ISHLD);
2743 __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2744 __ ldr(rscratch2, mdo_addr);
2745 __ eor(rscratch2, rscratch1, rscratch2);
2746 __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2747 __ cbz(rscratch2, ok);
2748
2749 __ stop("unexpected profiling mismatch");
2750 __ bind(ok);
2751 }
2752 #endif
2753 // first time here. Set profile type.
2754 __ str(tmp, mdo_addr);
2755 #ifdef ASSERT
2756 __ andr(tmp, tmp, TypeEntries::type_mask);
2757 __ verify_klass_ptr(tmp);
2758 #endif
2759 } else {
2760 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2761 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2762
2763 __ ldr(tmp, mdo_addr);
2764 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2765
2766 __ orr(tmp, tmp, TypeEntries::type_unknown);
2767 __ str(tmp, mdo_addr);
2768 // FIXME: Write barrier needed here?
2769 }
2770 }
2771
2772 __ bind(next);
2773 }
2774 COMMENT("} emit_profile_type");
2775 }
2776
2777
2778 void LIR_Assembler::align_backward_branch_target() {
2779 }
2780
2781
2782 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2783 // tmp must be unused
2784 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2785
2786 if (left->is_single_cpu()) {
2787 assert(dest->is_single_cpu(), "expect single result reg");
2788 __ negw(dest->as_register(), left->as_register());
2789 } else if (left->is_double_cpu()) {
2790 assert(dest->is_double_cpu(), "expect double result reg");
2791 __ neg(dest->as_register_lo(), left->as_register_lo());
2792 } else if (left->is_single_fpu()) {
2793 assert(dest->is_single_fpu(), "expect single float result reg");
2794 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2795 } else {
2796 assert(left->is_double_fpu(), "expect double float operand reg");
2797 assert(dest->is_double_fpu(), "expect double float result reg");
2798 __ fnegd(dest->as_double_reg(), left->as_double_reg());
2799 }
2800 }
2801
2802
2803 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2804 if (patch_code != lir_patch_none) {
2805 deoptimize_trap(info);
2806 return;
2807 }
2808
2809 __ lea(dest->as_pointer_register(), as_Address(addr->as_address_ptr()));
2810 }
2811
2812
2813 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2814 assert(!tmp->is_valid(), "don't need temporary");
2815
2816 CodeBlob *cb = CodeCache::find_blob(dest);
2817 if (cb) {
2818 __ far_call(RuntimeAddress(dest));
2819 } else {
2820 __ mov(rscratch1, RuntimeAddress(dest));
2821 __ blr(rscratch1);
2822 }
2823
2824 if (info != nullptr) {
2825 add_call_info_here(info);
2826 }
2827 __ post_call_nop();
2828 }
2829
2830 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2831 if (dest->is_address() || src->is_address()) {
2832 move_op(src, dest, type, lir_patch_none, info, /*wide*/false);
2833 } else {
2834 ShouldNotReachHere();
2835 }
2836 }
2837
2838 #ifdef ASSERT
2839 // emit run-time assertion
2840 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2841 assert(op->code() == lir_assert, "must be");
2842
2843 if (op->in_opr1()->is_valid()) {
2844 assert(op->in_opr2()->is_valid(), "both operands must be valid");
2845 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2846 } else {
2847 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2848 assert(op->condition() == lir_cond_always, "no other conditions allowed");
2849 }
2850
2851 Label ok;
2852 if (op->condition() != lir_cond_always) {
2853 Assembler::Condition acond = Assembler::AL;
2854 switch (op->condition()) {
2855 case lir_cond_equal: acond = Assembler::EQ; break;
2856 case lir_cond_notEqual: acond = Assembler::NE; break;
2857 case lir_cond_less: acond = Assembler::LT; break;
2858 case lir_cond_lessEqual: acond = Assembler::LE; break;
2859 case lir_cond_greaterEqual: acond = Assembler::GE; break;
2860 case lir_cond_greater: acond = Assembler::GT; break;
2861 case lir_cond_belowEqual: acond = Assembler::LS; break;
2862 case lir_cond_aboveEqual: acond = Assembler::HS; break;
2863 default: ShouldNotReachHere();
2864 }
2865 __ br(acond, ok);
2866 }
2867 if (op->halt()) {
2868 const char* str = __ code_string(op->msg());
2869 __ stop(str);
2870 } else {
2871 breakpoint();
2872 }
2873 __ bind(ok);
2874 }
2875 #endif
2876
2877 #ifndef PRODUCT
2878 #define COMMENT(x) do { __ block_comment(x); } while (0)
2879 #else
2880 #define COMMENT(x)
2881 #endif
2882
2883 void LIR_Assembler::membar() {
2884 COMMENT("membar");
2885 __ membar(MacroAssembler::AnyAny);
2886 }
2887
2888 void LIR_Assembler::membar_acquire() {
2889 __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2890 }
2891
2892 void LIR_Assembler::membar_release() {
2893 __ membar(Assembler::LoadStore|Assembler::StoreStore);
2894 }
2895
2896 void LIR_Assembler::membar_loadload() {
2897 __ membar(Assembler::LoadLoad);
2898 }
2899
2900 void LIR_Assembler::membar_storestore() {
2901 __ membar(MacroAssembler::StoreStore);
2902 }
2903
2904 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2905
2906 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2907
2908 void LIR_Assembler::on_spin_wait() {
2909 __ spin_wait();
2910 }
2911
2912 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2913 __ mov(result_reg->as_register(), rthread);
2914 }
2915
2916
2917 void LIR_Assembler::peephole(LIR_List *lir) {
2918 #if 0
2919 if (tableswitch_count >= max_tableswitches)
2920 return;
2921
2922 /*
2923 This finite-state automaton recognizes sequences of compare-and-
2924 branch instructions. We will turn them into a tableswitch. You
2925 could argue that C1 really shouldn't be doing this sort of
2926 optimization, but without it the code is really horrible.
2927 */
2928
2929 enum { start_s, cmp1_s, beq_s, cmp_s } state;
2930 int first_key, last_key = -2147483648;
2931 int next_key = 0;
2932 int start_insn = -1;
2933 int last_insn = -1;
2934 Register reg = noreg;
2935 LIR_Opr reg_opr;
2936 state = start_s;
2937
2938 LIR_OpList* inst = lir->instructions_list();
2939 for (int i = 0; i < inst->length(); i++) {
2940 LIR_Op* op = inst->at(i);
2941 switch (state) {
2942 case start_s:
2943 first_key = -1;
2944 start_insn = i;
2945 switch (op->code()) {
2946 case lir_cmp:
2947 LIR_Opr opr1 = op->as_Op2()->in_opr1();
2948 LIR_Opr opr2 = op->as_Op2()->in_opr2();
2949 if (opr1->is_cpu_register() && opr1->is_single_cpu()
2950 && opr2->is_constant()
2951 && opr2->type() == T_INT) {
2952 reg_opr = opr1;
2953 reg = opr1->as_register();
2954 first_key = opr2->as_constant_ptr()->as_jint();
2955 next_key = first_key + 1;
2956 state = cmp_s;
2957 goto next_state;
2958 }
2959 break;
2960 }
2961 break;
2962 case cmp_s:
2963 switch (op->code()) {
2964 case lir_branch:
2965 if (op->as_OpBranch()->cond() == lir_cond_equal) {
2966 state = beq_s;
2967 last_insn = i;
2968 goto next_state;
2969 }
2970 }
2971 state = start_s;
2972 break;
2973 case beq_s:
2974 switch (op->code()) {
2975 case lir_cmp: {
2976 LIR_Opr opr1 = op->as_Op2()->in_opr1();
2977 LIR_Opr opr2 = op->as_Op2()->in_opr2();
2978 if (opr1->is_cpu_register() && opr1->is_single_cpu()
2979 && opr1->as_register() == reg
2980 && opr2->is_constant()
2981 && opr2->type() == T_INT
2982 && opr2->as_constant_ptr()->as_jint() == next_key) {
2983 last_key = next_key;
2984 next_key++;
2985 state = cmp_s;
2986 goto next_state;
2987 }
2988 }
2989 }
2990 last_key = next_key;
2991 state = start_s;
2992 break;
2993 default:
2994 assert(false, "impossible state");
2995 }
2996 if (state == start_s) {
2997 if (first_key < last_key - 5L && reg != noreg) {
2998 {
2999 // printf("found run register %d starting at insn %d low value %d high value %d\n",
3000 // reg->encoding(),
3001 // start_insn, first_key, last_key);
3002 // for (int i = 0; i < inst->length(); i++) {
3003 // inst->at(i)->print();
3004 // tty->print("\n");
3005 // }
3006 // tty->print("\n");
3007 }
3008
3009 struct tableswitch *sw = &switches[tableswitch_count];
3010 sw->_insn_index = start_insn, sw->_first_key = first_key,
3011 sw->_last_key = last_key, sw->_reg = reg;
3012 inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3013 {
3014 // Insert the new table of branches
3015 int offset = last_insn;
3016 for (int n = first_key; n < last_key; n++) {
3017 inst->insert_before
3018 (last_insn + 1,
3019 new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3020 inst->at(offset)->as_OpBranch()->label()));
3021 offset -= 2, i++;
3022 }
3023 }
3024 // Delete all the old compare-and-branch instructions
3025 for (int n = first_key; n < last_key; n++) {
3026 inst->remove_at(start_insn);
3027 inst->remove_at(start_insn);
3028 }
3029 // Insert the tableswitch instruction
3030 inst->insert_before(start_insn,
3031 new LIR_Op2(lir_cmp, lir_cond_always,
3032 LIR_OprFact::intConst(tableswitch_count),
3033 reg_opr));
3034 inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3035 tableswitch_count++;
3036 }
3037 reg = noreg;
3038 last_key = -2147483648;
3039 }
3040 next_state:
3041 ;
3042 }
3043 #endif
3044 }
3045
3046 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3047 Address addr = as_Address(src->as_address_ptr());
3048 BasicType type = src->type();
3049 bool is_oop = is_reference_type(type);
3050
3051 void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3052 void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3053
3054 switch(type) {
3055 case T_INT:
3056 xchg = &MacroAssembler::atomic_xchgalw;
3057 add = &MacroAssembler::atomic_addalw;
3058 break;
3059 case T_LONG:
3060 xchg = &MacroAssembler::atomic_xchgal;
3061 add = &MacroAssembler::atomic_addal;
3062 break;
3063 case T_OBJECT:
3064 case T_ARRAY:
3065 if (UseCompressedOops) {
3066 xchg = &MacroAssembler::atomic_xchgalw;
3067 add = &MacroAssembler::atomic_addalw;
3068 } else {
3069 xchg = &MacroAssembler::atomic_xchgal;
3070 add = &MacroAssembler::atomic_addal;
3071 }
3072 break;
3073 default:
3074 ShouldNotReachHere();
3075 xchg = &MacroAssembler::atomic_xchgal;
3076 add = &MacroAssembler::atomic_addal; // unreachable
3077 }
3078
3079 switch (code) {
3080 case lir_xadd:
3081 {
3082 RegisterOrConstant inc;
3083 Register tmp = as_reg(tmp_op);
3084 Register dst = as_reg(dest);
3085 if (data->is_constant()) {
3086 inc = RegisterOrConstant(as_long(data));
3087 assert_different_registers(dst, addr.base(), tmp,
3088 rscratch1, rscratch2);
3089 } else {
3090 inc = RegisterOrConstant(as_reg(data));
3091 assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3092 rscratch1, rscratch2);
3093 }
3094 __ lea(tmp, addr);
3095 (_masm->*add)(dst, inc, tmp);
3096 break;
3097 }
3098 case lir_xchg:
3099 {
3100 Register tmp = tmp_op->as_register();
3101 Register obj = as_reg(data);
3102 Register dst = as_reg(dest);
3103 if (is_oop && UseCompressedOops) {
3104 __ encode_heap_oop(rscratch2, obj);
3105 obj = rscratch2;
3106 }
3107 assert_different_registers(obj, addr.base(), tmp, rscratch1);
3108 assert_different_registers(dst, addr.base(), tmp, rscratch1);
3109 __ lea(tmp, addr);
3110 (_masm->*xchg)(dst, obj, tmp);
3111 if (is_oop && UseCompressedOops) {
3112 __ decode_heap_oop(dst);
3113 }
3114 }
3115 break;
3116 default:
3117 ShouldNotReachHere();
3118 }
3119 if(!UseLSE) {
3120 __ membar(__ AnyAny);
3121 }
3122 }
3123
3124 #undef __