1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "ci/ciUtilities.hpp"
37 #include "code/aotCodeCache.hpp"
38 #include "code/compiledIC.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gc_globals.hpp"
41 #include "nativeInst_aarch64.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/threadIdentifier.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 #include "vmreg_aarch64.inline.hpp"
49
50
51 #ifndef PRODUCT
52 #define COMMENT(x) do { __ block_comment(x); } while (0)
53 #else
54 #define COMMENT(x)
55 #endif
56
57 NEEDS_CLEANUP // remove this definitions ?
58 const Register SYNC_header = r0; // synchronization header
59 const Register SHIFT_count = r0; // where count for shift operations must be
60
61 #define __ _masm->
62
63
64 static void select_different_registers(Register preserve,
65 Register extra,
66 Register &tmp1,
67 Register &tmp2) {
68 if (tmp1 == preserve) {
69 assert_different_registers(tmp1, tmp2, extra);
70 tmp1 = extra;
71 } else if (tmp2 == preserve) {
72 assert_different_registers(tmp1, tmp2, extra);
73 tmp2 = extra;
74 }
75 assert_different_registers(preserve, tmp1, tmp2);
76 }
77
78
79
80 static void select_different_registers(Register preserve,
81 Register extra,
82 Register &tmp1,
83 Register &tmp2,
84 Register &tmp3) {
85 if (tmp1 == preserve) {
86 assert_different_registers(tmp1, tmp2, tmp3, extra);
87 tmp1 = extra;
88 } else if (tmp2 == preserve) {
89 assert_different_registers(tmp1, tmp2, tmp3, extra);
90 tmp2 = extra;
91 } else if (tmp3 == preserve) {
92 assert_different_registers(tmp1, tmp2, tmp3, extra);
93 tmp3 = extra;
94 }
95 assert_different_registers(preserve, tmp1, tmp2, tmp3);
96 }
97
98
99 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
100
101
102 LIR_Opr LIR_Assembler::receiverOpr() {
103 return FrameMap::receiver_opr;
104 }
105
106 LIR_Opr LIR_Assembler::osrBufferPointer() {
107 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
108 }
109
110 //--------------fpu register translations-----------------------
111
112
113 address LIR_Assembler::float_constant(float f) {
114 address const_addr = __ float_constant(f);
115 if (const_addr == nullptr) {
116 bailout("const section overflow");
117 return __ code()->consts()->start();
118 } else {
119 return const_addr;
120 }
121 }
122
123
124 address LIR_Assembler::double_constant(double d) {
125 address const_addr = __ double_constant(d);
126 if (const_addr == nullptr) {
127 bailout("const section overflow");
128 return __ code()->consts()->start();
129 } else {
130 return const_addr;
131 }
132 }
133
134 address LIR_Assembler::int_constant(jlong n) {
135 address const_addr = __ long_constant(n);
136 if (const_addr == nullptr) {
137 bailout("const section overflow");
138 return __ code()->consts()->start();
139 } else {
140 return const_addr;
141 }
142 }
143
144 void LIR_Assembler::breakpoint() { Unimplemented(); }
145
146 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
147
148 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
149
150 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
151 //-------------------------------------------
152
153 static Register as_reg(LIR_Opr op) {
154 return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
155 }
156
157 static jlong as_long(LIR_Opr data) {
158 jlong result;
159 switch (data->type()) {
160 case T_INT:
161 result = (data->as_jint());
162 break;
163 case T_LONG:
164 result = (data->as_jlong());
165 break;
166 default:
167 ShouldNotReachHere();
168 result = 0; // unreachable
169 }
170 return result;
171 }
172
173 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
174 Register base = addr->base()->as_pointer_register();
175 LIR_Opr opr = addr->index();
176 if (opr->is_cpu_register()) {
177 Register index;
178 if (opr->is_single_cpu())
179 index = opr->as_register();
180 else
181 index = opr->as_register_lo();
182 assert(addr->disp() == 0, "must be");
183 switch(opr->type()) {
184 case T_INT:
185 return Address(base, index, Address::sxtw(addr->scale()));
186 case T_LONG:
187 return Address(base, index, Address::lsl(addr->scale()));
188 default:
189 ShouldNotReachHere();
190 }
191 } else {
192 assert(addr->scale() == 0,
193 "expected for immediate operand, was: %d", addr->scale());
194 ptrdiff_t offset = ptrdiff_t(addr->disp());
195 // NOTE: Does not handle any 16 byte vector access.
196 const uint type_size = type2aelembytes(addr->type(), true);
197 return __ legitimize_address(Address(base, offset), type_size, tmp);
198 }
199 return Address();
200 }
201
202 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
203 ShouldNotReachHere();
204 return Address();
205 }
206
207 Address LIR_Assembler::as_Address(LIR_Address* addr) {
208 return as_Address(addr, rscratch1);
209 }
210
211 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
212 return as_Address(addr, rscratch1); // Ouch
213 // FIXME: This needs to be much more clever. See x86.
214 }
215
216 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
217 // not encodable as a base + (immediate) offset, generate an explicit address
218 // calculation to hold the address in a temporary register.
219 Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {
220 precond(size == 4 || size == 8);
221 Address addr = frame_map()->address_for_slot(index, adjust);
222 precond(addr.getMode() == Address::base_plus_offset);
223 precond(addr.base() == sp);
224 precond(addr.offset() > 0);
225 uint mask = size - 1;
226 assert((addr.offset() & mask) == 0, "scaled offsets only");
227 return __ legitimize_address(addr, size, tmp);
228 }
229
230 void LIR_Assembler::osr_entry() {
231 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
232 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
233 ValueStack* entry_state = osr_entry->state();
234 int number_of_locks = entry_state->locks_size();
235
236 // we jump here if osr happens with the interpreter
237 // state set up to continue at the beginning of the
238 // loop that triggered osr - in particular, we have
239 // the following registers setup:
240 //
241 // r2: osr buffer
242 //
243
244 // build frame
245 ciMethod* m = compilation()->method();
246 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
247
248 // OSR buffer is
249 //
250 // locals[nlocals-1..0]
251 // monitors[0..number_of_locks]
252 //
253 // locals is a direct copy of the interpreter frame so in the osr buffer
254 // so first slot in the local array is the last local from the interpreter
255 // and last slot is local[0] (receiver) from the interpreter
256 //
257 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
258 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
259 // in the interpreter frame (the method lock if a sync method)
260
261 // Initialize monitors in the compiled activation.
262 // r2: pointer to osr buffer
263 //
264 // All other registers are dead at this point and the locals will be
265 // copied into place by code emitted in the IR.
266
267 Register OSR_buf = osrBufferPointer()->as_pointer_register();
268 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
269 int monitor_offset = BytesPerWord * method()->max_locals() +
270 (2 * BytesPerWord) * (number_of_locks - 1);
271 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
272 // the OSR buffer using 2 word entries: first the lock and then
273 // the oop.
274 for (int i = 0; i < number_of_locks; i++) {
275 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
276 #ifdef ASSERT
277 // verify the interpreter's monitor has a non-null object
278 {
279 Label L;
280 __ ldr(rscratch1, __ form_address(rscratch1, OSR_buf, slot_offset + 1*BytesPerWord, 0));
281 __ cbnz(rscratch1, L);
282 __ stop("locked object is null");
283 __ bind(L);
284 }
285 #endif
286 __ ldr(r19, __ form_address(rscratch1, OSR_buf, slot_offset, 0));
287 __ ldr(r20, __ form_address(rscratch1, OSR_buf, slot_offset + BytesPerWord, 0));
288 __ str(r19, frame_map()->address_for_monitor_lock(i));
289 __ str(r20, frame_map()->address_for_monitor_object(i));
290 }
291 }
292 }
293
294
295 // inline cache check; done before the frame is built.
296 int LIR_Assembler::check_icache() {
297 return __ ic_check(CodeEntryAlignment);
298 }
299
300 void LIR_Assembler::clinit_barrier(ciMethod* method) {
301 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
302 assert(!method->holder()->is_not_initialized(), "initialization should have been started");
303
304 Label L_skip_barrier;
305
306 __ mov_metadata(rscratch2, method->holder()->constant_encoding());
307 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier /*L_fast_path*/);
308 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
309 __ bind(L_skip_barrier);
310 }
311
312 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
313 if (o == nullptr) {
314 __ mov(reg, zr);
315 } else {
316 __ movoop(reg, o);
317 }
318 }
319
320 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
321 address target = nullptr;
322 relocInfo::relocType reloc_type = relocInfo::none;
323
324 switch (patching_id(info)) {
325 case PatchingStub::access_field_id:
326 target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
327 reloc_type = relocInfo::section_word_type;
328 break;
329 case PatchingStub::load_klass_id:
330 target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
331 reloc_type = relocInfo::metadata_type;
332 break;
333 case PatchingStub::load_mirror_id:
334 target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
335 reloc_type = relocInfo::oop_type;
336 break;
337 case PatchingStub::load_appendix_id:
338 target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
339 reloc_type = relocInfo::oop_type;
340 break;
341 default: ShouldNotReachHere();
342 }
343
344 __ far_call(RuntimeAddress(target));
345 add_call_info_here(info);
346 }
347
348 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
349 deoptimize_trap(info);
350 }
351
352
353 // This specifies the rsp decrement needed to build the frame
354 int LIR_Assembler::initial_frame_size_in_bytes() const {
355 // if rounding, must let FrameMap know!
356
357 return in_bytes(frame_map()->framesize_in_bytes());
358 }
359
360
361 int LIR_Assembler::emit_exception_handler() {
362 // generate code for exception handler
363 address handler_base = __ start_a_stub(exception_handler_size());
364 if (handler_base == nullptr) {
365 // not enough space left for the handler
366 bailout("exception handler overflow");
367 return -1;
368 }
369
370 int offset = code_offset();
371
372 // the exception oop and pc are in r0, and r3
373 // no other registers need to be preserved, so invalidate them
374 __ invalidate_registers(false, true, true, false, true, true);
375
376 // check that there is really an exception
377 __ verify_not_null_oop(r0);
378
379 // search an exception handler (r0: exception oop, r3: throwing pc)
380 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
381 __ should_not_reach_here();
382 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
383 __ end_a_stub();
384
385 return offset;
386 }
387
388
389 // Emit the code to remove the frame from the stack in the exception
390 // unwind path.
391 int LIR_Assembler::emit_unwind_handler() {
392 #ifndef PRODUCT
393 if (CommentedAssembly) {
394 _masm->block_comment("Unwind handler");
395 }
396 #endif
397
398 int offset = code_offset();
399
400 // Fetch the exception from TLS and clear out exception related thread state
401 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
402 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
403 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
404
405 __ bind(_unwind_handler_entry);
406 __ verify_not_null_oop(r0);
407 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
408 __ mov(r19, r0); // Preserve the exception
409 }
410
411 // Perform needed unlocking
412 MonitorExitStub* stub = nullptr;
413 if (method()->is_synchronized()) {
414 monitor_address(0, FrameMap::r0_opr);
415 stub = new MonitorExitStub(FrameMap::r0_opr, 0);
416 __ unlock_object(r5, r4, r0, r6, *stub->entry());
417 __ bind(*stub->continuation());
418 }
419
420 if (compilation()->env()->dtrace_method_probes()) {
421 __ mov(c_rarg0, rthread);
422 __ mov_metadata(c_rarg1, method()->constant_encoding());
423 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
424 }
425
426 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
427 __ mov(r0, r19); // Restore the exception
428 }
429
430 // remove the activation and dispatch to the unwind handler
431 __ block_comment("remove_frame and dispatch to the unwind handler");
432 __ remove_frame(initial_frame_size_in_bytes());
433 __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
434
435 // Emit the slow path assembly
436 if (stub != nullptr) {
437 stub->emit_code(this);
438 }
439
440 return offset;
441 }
442
443
444 int LIR_Assembler::emit_deopt_handler() {
445 // generate code for exception handler
446 address handler_base = __ start_a_stub(deopt_handler_size());
447 if (handler_base == nullptr) {
448 // not enough space left for the handler
449 bailout("deopt handler overflow");
450 return -1;
451 }
452
453 int offset = code_offset();
454
455 __ adr(lr, pc());
456 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
457 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
458 __ end_a_stub();
459
460 return offset;
461 }
462
463 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
464 _masm->code_section()->relocate(adr, relocInfo::poll_type);
465 int pc_offset = code_offset();
466 flush_debug_info(pc_offset);
467 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
468 if (info->exception_handlers() != nullptr) {
469 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
470 }
471 }
472
473 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
474 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
475
476 // Pop the stack before the safepoint code
477 __ remove_frame(initial_frame_size_in_bytes());
478
479 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
480 __ reserved_stack_check();
481 }
482
483 code_stub->set_safepoint_offset(__ offset());
484 __ relocate(relocInfo::poll_return_type);
485 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
486 __ ret(lr);
487 }
488
489 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
490 guarantee(info != nullptr, "Shouldn't be null");
491 __ get_polling_page(rscratch1, relocInfo::poll_type);
492 add_debug_info_for_branch(info); // This isn't just debug info:
493 // it's the oop map
494 __ read_polling_page(rscratch1, relocInfo::poll_type);
495 return __ offset();
496 }
497
498
499 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
500 if (from_reg == r31_sp)
501 from_reg = sp;
502 if (to_reg == r31_sp)
503 to_reg = sp;
504 __ mov(to_reg, from_reg);
505 }
506
507 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
508
509
510 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
511 assert(src->is_constant(), "should not call otherwise");
512 assert(dest->is_register(), "should not call otherwise");
513 LIR_Const* c = src->as_constant_ptr();
514
515 switch (c->type()) {
516 case T_INT: {
517 assert(patch_code == lir_patch_none, "no patching handled here");
518 __ movw(dest->as_register(), c->as_jint());
519 break;
520 }
521
522 case T_ADDRESS: {
523 assert(patch_code == lir_patch_none, "no patching handled here");
524 __ mov(dest->as_register(), c->as_jint());
525 break;
526 }
527
528 case T_LONG: {
529 assert(patch_code == lir_patch_none, "no patching handled here");
530 #if INCLUDE_CDS
531 if (AOTCodeCache::is_on_for_dump()) {
532 address b = c->as_pointer();
533 if (b == (address)ThreadIdentifier::unsafe_offset()) {
534 __ lea(dest->as_register_lo(), ExternalAddress(b));
535 break;
536 }
537 if (AOTRuntimeConstants::contains(b)) {
538 __ load_aotrc_address(dest->as_register_lo(), b);
539 break;
540 }
541 }
542 #endif
543 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
544 break;
545 }
546
547 case T_OBJECT: {
548 if (patch_code == lir_patch_none) {
549 jobject2reg(c->as_jobject(), dest->as_register());
550 } else {
551 jobject2reg_with_patching(dest->as_register(), info);
552 }
553 break;
554 }
555
556 case T_METADATA: {
557 if (patch_code != lir_patch_none) {
558 klass2reg_with_patching(dest->as_register(), info);
559 } else {
560 __ mov_metadata(dest->as_register(), c->as_metadata());
561 }
562 break;
563 }
564
565 case T_FLOAT: {
566 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
567 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
568 } else {
569 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
570 __ ldrs(dest->as_float_reg(), Address(rscratch1));
571 }
572 break;
573 }
574
575 case T_DOUBLE: {
576 if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
577 __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
578 } else {
579 __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
580 __ ldrd(dest->as_double_reg(), Address(rscratch1));
581 }
582 break;
583 }
584
585 default:
586 ShouldNotReachHere();
587 }
588 }
589
590 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
591 LIR_Const* c = src->as_constant_ptr();
592 switch (c->type()) {
593 case T_OBJECT:
594 {
595 if (! c->as_jobject())
596 __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
597 else {
598 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
599 reg2stack(FrameMap::rscratch1_opr, dest, c->type());
600 }
601 }
602 break;
603 case T_ADDRESS:
604 {
605 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
606 reg2stack(FrameMap::rscratch1_opr, dest, c->type());
607 }
608 case T_INT:
609 case T_FLOAT:
610 {
611 Register reg = zr;
612 if (c->as_jint_bits() == 0)
613 __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
614 else {
615 __ movw(rscratch1, c->as_jint_bits());
616 __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
617 }
618 }
619 break;
620 case T_LONG:
621 case T_DOUBLE:
622 {
623 Register reg = zr;
624 if (c->as_jlong_bits() == 0)
625 __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
626 lo_word_offset_in_bytes));
627 else {
628 __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
629 __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
630 lo_word_offset_in_bytes));
631 }
632 }
633 break;
634 default:
635 ShouldNotReachHere();
636 }
637 }
638
639 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
640 assert(src->is_constant(), "should not call otherwise");
641 LIR_Const* c = src->as_constant_ptr();
642 LIR_Address* to_addr = dest->as_address_ptr();
643
644 void (Assembler::* insn)(Register Rt, const Address &adr);
645
646 switch (type) {
647 case T_ADDRESS:
648 assert(c->as_jint() == 0, "should be");
649 insn = &Assembler::str;
650 break;
651 case T_LONG:
652 assert(c->as_jlong() == 0, "should be");
653 insn = &Assembler::str;
654 break;
655 case T_INT:
656 assert(c->as_jint() == 0, "should be");
657 insn = &Assembler::strw;
658 break;
659 case T_OBJECT:
660 case T_ARRAY:
661 assert(c->as_jobject() == nullptr, "should be");
662 if (UseCompressedOops && !wide) {
663 insn = &Assembler::strw;
664 } else {
665 insn = &Assembler::str;
666 }
667 break;
668 case T_CHAR:
669 case T_SHORT:
670 assert(c->as_jint() == 0, "should be");
671 insn = &Assembler::strh;
672 break;
673 case T_BOOLEAN:
674 case T_BYTE:
675 assert(c->as_jint() == 0, "should be");
676 insn = &Assembler::strb;
677 break;
678 default:
679 ShouldNotReachHere();
680 insn = &Assembler::str; // unreachable
681 }
682
683 if (info) add_debug_info_for_null_check_here(info);
684 (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
685 }
686
687 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
688 assert(src->is_register(), "should not call otherwise");
689 assert(dest->is_register(), "should not call otherwise");
690
691 // move between cpu-registers
692 if (dest->is_single_cpu()) {
693 if (src->type() == T_LONG) {
694 // Can do LONG -> OBJECT
695 move_regs(src->as_register_lo(), dest->as_register());
696 return;
697 }
698 assert(src->is_single_cpu(), "must match");
699 if (src->type() == T_OBJECT) {
700 __ verify_oop(src->as_register());
701 }
702 move_regs(src->as_register(), dest->as_register());
703
704 } else if (dest->is_double_cpu()) {
705 if (is_reference_type(src->type())) {
706 // Surprising to me but we can see move of a long to t_object
707 __ verify_oop(src->as_register());
708 move_regs(src->as_register(), dest->as_register_lo());
709 return;
710 }
711 assert(src->is_double_cpu(), "must match");
712 Register f_lo = src->as_register_lo();
713 Register f_hi = src->as_register_hi();
714 Register t_lo = dest->as_register_lo();
715 Register t_hi = dest->as_register_hi();
716 assert(f_hi == f_lo, "must be same");
717 assert(t_hi == t_lo, "must be same");
718 move_regs(f_lo, t_lo);
719
720 } else if (dest->is_single_fpu()) {
721 __ fmovs(dest->as_float_reg(), src->as_float_reg());
722
723 } else if (dest->is_double_fpu()) {
724 __ fmovd(dest->as_double_reg(), src->as_double_reg());
725
726 } else {
727 ShouldNotReachHere();
728 }
729 }
730
731 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
732 precond(src->is_register() && dest->is_stack());
733
734 uint const c_sz32 = sizeof(uint32_t);
735 uint const c_sz64 = sizeof(uint64_t);
736
737 if (src->is_single_cpu()) {
738 int index = dest->single_stack_ix();
739 if (is_reference_type(type)) {
740 __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
741 __ verify_oop(src->as_register());
742 } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
743 __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
744 } else {
745 __ strw(src->as_register(), stack_slot_address(index, c_sz32, rscratch1));
746 }
747
748 } else if (src->is_double_cpu()) {
749 int index = dest->double_stack_ix();
750 Address dest_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
751 __ str(src->as_register_lo(), dest_addr_LO);
752
753 } else if (src->is_single_fpu()) {
754 int index = dest->single_stack_ix();
755 __ strs(src->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
756
757 } else if (src->is_double_fpu()) {
758 int index = dest->double_stack_ix();
759 __ strd(src->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
760
761 } else {
762 ShouldNotReachHere();
763 }
764 }
765
766
767 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
768 LIR_Address* to_addr = dest->as_address_ptr();
769 PatchingStub* patch = nullptr;
770 Register compressed_src = rscratch1;
771
772 if (patch_code != lir_patch_none) {
773 deoptimize_trap(info);
774 return;
775 }
776
777 if (is_reference_type(type)) {
778 __ verify_oop(src->as_register());
779
780 if (UseCompressedOops && !wide) {
781 __ encode_heap_oop(compressed_src, src->as_register());
782 } else {
783 compressed_src = src->as_register();
784 }
785 }
786
787 int null_check_here = code_offset();
788 switch (type) {
789 case T_FLOAT: {
790 __ strs(src->as_float_reg(), as_Address(to_addr));
791 break;
792 }
793
794 case T_DOUBLE: {
795 __ strd(src->as_double_reg(), as_Address(to_addr));
796 break;
797 }
798
799 case T_ARRAY: // fall through
800 case T_OBJECT: // fall through
801 if (UseCompressedOops && !wide) {
802 __ strw(compressed_src, as_Address(to_addr, rscratch2));
803 } else {
804 __ str(compressed_src, as_Address(to_addr));
805 }
806 break;
807 case T_METADATA:
808 // We get here to store a method pointer to the stack to pass to
809 // a dtrace runtime call. This can't work on 64 bit with
810 // compressed klass ptrs: T_METADATA can be a compressed klass
811 // ptr or a 64 bit method pointer.
812 ShouldNotReachHere();
813 __ str(src->as_register(), as_Address(to_addr));
814 break;
815 case T_ADDRESS:
816 __ str(src->as_register(), as_Address(to_addr));
817 break;
818 case T_INT:
819 __ strw(src->as_register(), as_Address(to_addr));
820 break;
821
822 case T_LONG: {
823 __ str(src->as_register_lo(), as_Address_lo(to_addr));
824 break;
825 }
826
827 case T_BYTE: // fall through
828 case T_BOOLEAN: {
829 __ strb(src->as_register(), as_Address(to_addr));
830 break;
831 }
832
833 case T_CHAR: // fall through
834 case T_SHORT:
835 __ strh(src->as_register(), as_Address(to_addr));
836 break;
837
838 default:
839 ShouldNotReachHere();
840 }
841 if (info != nullptr) {
842 add_debug_info_for_null_check(null_check_here, info);
843 }
844 }
845
846
847 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
848 precond(src->is_stack() && dest->is_register());
849
850 uint const c_sz32 = sizeof(uint32_t);
851 uint const c_sz64 = sizeof(uint64_t);
852
853 if (dest->is_single_cpu()) {
854 int index = src->single_stack_ix();
855 if (is_reference_type(type)) {
856 __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
857 __ verify_oop(dest->as_register());
858 } else if (type == T_METADATA || type == T_ADDRESS) {
859 __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
860 } else {
861 __ ldrw(dest->as_register(), stack_slot_address(index, c_sz32, rscratch1));
862 }
863
864 } else if (dest->is_double_cpu()) {
865 int index = src->double_stack_ix();
866 Address src_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
867 __ ldr(dest->as_register_lo(), src_addr_LO);
868
869 } else if (dest->is_single_fpu()) {
870 int index = src->single_stack_ix();
871 __ ldrs(dest->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
872
873 } else if (dest->is_double_fpu()) {
874 int index = src->double_stack_ix();
875 __ ldrd(dest->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
876
877 } else {
878 ShouldNotReachHere();
879 }
880 }
881
882
883 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
884 address target = nullptr;
885 relocInfo::relocType reloc_type = relocInfo::none;
886
887 switch (patching_id(info)) {
888 case PatchingStub::access_field_id:
889 target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
890 reloc_type = relocInfo::section_word_type;
891 break;
892 case PatchingStub::load_klass_id:
893 target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
894 reloc_type = relocInfo::metadata_type;
895 break;
896 case PatchingStub::load_mirror_id:
897 target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
898 reloc_type = relocInfo::oop_type;
899 break;
900 case PatchingStub::load_appendix_id:
901 target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
902 reloc_type = relocInfo::oop_type;
903 break;
904 default: ShouldNotReachHere();
905 }
906
907 __ far_call(RuntimeAddress(target));
908 add_call_info_here(info);
909 }
910
911 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
912
913 LIR_Opr temp;
914 if (type == T_LONG || type == T_DOUBLE)
915 temp = FrameMap::rscratch1_long_opr;
916 else
917 temp = FrameMap::rscratch1_opr;
918
919 stack2reg(src, temp, src->type());
920 reg2stack(temp, dest, dest->type());
921 }
922
923
924 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
925 LIR_Address* addr = src->as_address_ptr();
926 LIR_Address* from_addr = src->as_address_ptr();
927
928 if (addr->base()->type() == T_OBJECT) {
929 __ verify_oop(addr->base()->as_pointer_register());
930 }
931
932 if (patch_code != lir_patch_none) {
933 deoptimize_trap(info);
934 return;
935 }
936
937 if (info != nullptr) {
938 add_debug_info_for_null_check_here(info);
939 }
940 int null_check_here = code_offset();
941 switch (type) {
942 case T_FLOAT: {
943 __ ldrs(dest->as_float_reg(), as_Address(from_addr));
944 break;
945 }
946
947 case T_DOUBLE: {
948 __ ldrd(dest->as_double_reg(), as_Address(from_addr));
949 break;
950 }
951
952 case T_ARRAY: // fall through
953 case T_OBJECT: // fall through
954 if (UseCompressedOops && !wide) {
955 __ ldrw(dest->as_register(), as_Address(from_addr));
956 } else {
957 __ ldr(dest->as_register(), as_Address(from_addr));
958 }
959 break;
960 case T_METADATA:
961 // We get here to store a method pointer to the stack to pass to
962 // a dtrace runtime call. This can't work on 64 bit with
963 // compressed klass ptrs: T_METADATA can be a compressed klass
964 // ptr or a 64 bit method pointer.
965 ShouldNotReachHere();
966 __ ldr(dest->as_register(), as_Address(from_addr));
967 break;
968 case T_ADDRESS:
969 __ ldr(dest->as_register(), as_Address(from_addr));
970 break;
971 case T_INT:
972 __ ldrw(dest->as_register(), as_Address(from_addr));
973 break;
974
975 case T_LONG: {
976 __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
977 break;
978 }
979
980 case T_BYTE:
981 __ ldrsb(dest->as_register(), as_Address(from_addr));
982 break;
983 case T_BOOLEAN: {
984 __ ldrb(dest->as_register(), as_Address(from_addr));
985 break;
986 }
987
988 case T_CHAR:
989 __ ldrh(dest->as_register(), as_Address(from_addr));
990 break;
991 case T_SHORT:
992 __ ldrsh(dest->as_register(), as_Address(from_addr));
993 break;
994
995 default:
996 ShouldNotReachHere();
997 }
998
999 if (is_reference_type(type)) {
1000 if (UseCompressedOops && !wide) {
1001 __ decode_heap_oop(dest->as_register());
1002 }
1003
1004 __ verify_oop(dest->as_register());
1005 }
1006 }
1007
1008
1009 int LIR_Assembler::array_element_size(BasicType type) const {
1010 int elem_size = type2aelembytes(type);
1011 return exact_log2(elem_size);
1012 }
1013
1014
1015 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1016 switch (op->code()) {
1017 case lir_idiv:
1018 case lir_irem:
1019 arithmetic_idiv(op->code(),
1020 op->in_opr1(),
1021 op->in_opr2(),
1022 op->in_opr3(),
1023 op->result_opr(),
1024 op->info());
1025 break;
1026 case lir_fmad:
1027 __ fmaddd(op->result_opr()->as_double_reg(),
1028 op->in_opr1()->as_double_reg(),
1029 op->in_opr2()->as_double_reg(),
1030 op->in_opr3()->as_double_reg());
1031 break;
1032 case lir_fmaf:
1033 __ fmadds(op->result_opr()->as_float_reg(),
1034 op->in_opr1()->as_float_reg(),
1035 op->in_opr2()->as_float_reg(),
1036 op->in_opr3()->as_float_reg());
1037 break;
1038 default: ShouldNotReachHere(); break;
1039 }
1040 }
1041
1042 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1043 #ifdef ASSERT
1044 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1045 if (op->block() != nullptr) _branch_target_blocks.append(op->block());
1046 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1047 #endif
1048
1049 if (op->cond() == lir_cond_always) {
1050 if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1051 __ b(*(op->label()));
1052 } else {
1053 Assembler::Condition acond;
1054 if (op->code() == lir_cond_float_branch) {
1055 bool is_unordered = (op->ublock() == op->block());
1056 // Assembler::EQ does not permit unordered branches, so we add
1057 // another branch here. Likewise, Assembler::NE does not permit
1058 // ordered branches.
1059 if ((is_unordered && op->cond() == lir_cond_equal)
1060 || (!is_unordered && op->cond() == lir_cond_notEqual))
1061 __ br(Assembler::VS, *(op->ublock()->label()));
1062 switch(op->cond()) {
1063 case lir_cond_equal: acond = Assembler::EQ; break;
1064 case lir_cond_notEqual: acond = Assembler::NE; break;
1065 case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1066 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1067 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1068 case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1069 default: ShouldNotReachHere();
1070 acond = Assembler::EQ; // unreachable
1071 }
1072 } else {
1073 switch (op->cond()) {
1074 case lir_cond_equal: acond = Assembler::EQ; break;
1075 case lir_cond_notEqual: acond = Assembler::NE; break;
1076 case lir_cond_less: acond = Assembler::LT; break;
1077 case lir_cond_lessEqual: acond = Assembler::LE; break;
1078 case lir_cond_greaterEqual: acond = Assembler::GE; break;
1079 case lir_cond_greater: acond = Assembler::GT; break;
1080 case lir_cond_belowEqual: acond = Assembler::LS; break;
1081 case lir_cond_aboveEqual: acond = Assembler::HS; break;
1082 default: ShouldNotReachHere();
1083 acond = Assembler::EQ; // unreachable
1084 }
1085 }
1086 __ br(acond,*(op->label()));
1087 }
1088 }
1089
1090
1091
1092 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1093 LIR_Opr src = op->in_opr();
1094 LIR_Opr dest = op->result_opr();
1095
1096 switch (op->bytecode()) {
1097 case Bytecodes::_i2f:
1098 {
1099 __ scvtfws(dest->as_float_reg(), src->as_register());
1100 break;
1101 }
1102 case Bytecodes::_i2d:
1103 {
1104 __ scvtfwd(dest->as_double_reg(), src->as_register());
1105 break;
1106 }
1107 case Bytecodes::_l2d:
1108 {
1109 __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1110 break;
1111 }
1112 case Bytecodes::_l2f:
1113 {
1114 __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1115 break;
1116 }
1117 case Bytecodes::_f2d:
1118 {
1119 __ fcvts(dest->as_double_reg(), src->as_float_reg());
1120 break;
1121 }
1122 case Bytecodes::_d2f:
1123 {
1124 __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1125 break;
1126 }
1127 case Bytecodes::_i2c:
1128 {
1129 __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1130 break;
1131 }
1132 case Bytecodes::_i2l:
1133 {
1134 __ sxtw(dest->as_register_lo(), src->as_register());
1135 break;
1136 }
1137 case Bytecodes::_i2s:
1138 {
1139 __ sxth(dest->as_register(), src->as_register());
1140 break;
1141 }
1142 case Bytecodes::_i2b:
1143 {
1144 __ sxtb(dest->as_register(), src->as_register());
1145 break;
1146 }
1147 case Bytecodes::_l2i:
1148 {
1149 _masm->block_comment("FIXME: This could be a no-op");
1150 __ uxtw(dest->as_register(), src->as_register_lo());
1151 break;
1152 }
1153 case Bytecodes::_d2l:
1154 {
1155 __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1156 break;
1157 }
1158 case Bytecodes::_f2i:
1159 {
1160 __ fcvtzsw(dest->as_register(), src->as_float_reg());
1161 break;
1162 }
1163 case Bytecodes::_f2l:
1164 {
1165 __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1166 break;
1167 }
1168 case Bytecodes::_d2i:
1169 {
1170 __ fcvtzdw(dest->as_register(), src->as_double_reg());
1171 break;
1172 }
1173 default: ShouldNotReachHere();
1174 }
1175 }
1176
1177 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1178 if (op->init_check()) {
1179 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1180 __ ldarb(rscratch1, rscratch1);
1181 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1182 add_debug_info_for_null_check_here(op->stub()->info());
1183 __ br(Assembler::NE, *op->stub()->entry());
1184 }
1185 __ allocate_object(op->obj()->as_register(),
1186 op->tmp1()->as_register(),
1187 op->tmp2()->as_register(),
1188 op->header_size(),
1189 op->object_size(),
1190 op->klass()->as_register(),
1191 *op->stub()->entry());
1192 __ bind(*op->stub()->continuation());
1193 }
1194
1195 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1196 Register len = op->len()->as_register();
1197 __ uxtw(len, len);
1198
1199 if (UseSlowPath ||
1200 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1201 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1202 __ b(*op->stub()->entry());
1203 } else {
1204 Register tmp1 = op->tmp1()->as_register();
1205 Register tmp2 = op->tmp2()->as_register();
1206 Register tmp3 = op->tmp3()->as_register();
1207 if (len == tmp1) {
1208 tmp1 = tmp3;
1209 } else if (len == tmp2) {
1210 tmp2 = tmp3;
1211 } else if (len == tmp3) {
1212 // everything is ok
1213 } else {
1214 __ mov(tmp3, len);
1215 }
1216 __ allocate_array(op->obj()->as_register(),
1217 len,
1218 tmp1,
1219 tmp2,
1220 arrayOopDesc::base_offset_in_bytes(op->type()),
1221 array_element_size(op->type()),
1222 op->klass()->as_register(),
1223 *op->stub()->entry(),
1224 op->zero_array());
1225 }
1226 __ bind(*op->stub()->continuation());
1227 }
1228
1229 void LIR_Assembler::type_profile_helper(Register mdo,
1230 ciMethodData *md, ciProfileData *data,
1231 Register recv, Label* update_done) {
1232
1233 // Given a profile data offset, generate an Address which points to
1234 // the corresponding slot in mdo->data().
1235 // Clobbers rscratch2.
1236 auto slot_at = [=](ByteSize offset) -> Address {
1237 return __ form_address(rscratch2, mdo,
1238 md->byte_offset_of_slot(data, offset),
1239 LogBytesPerWord);
1240 };
1241
1242 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1243 Label next_test;
1244 // See if the receiver is receiver[n].
1245 __ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
1246 __ cmp(recv, rscratch1);
1247 __ br(Assembler::NE, next_test);
1248 __ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
1249 DataLayout::counter_increment);
1250 __ b(*update_done);
1251 __ bind(next_test);
1252 }
1253
1254 // Didn't find receiver; find next empty slot and fill it in
1255 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1256 Label next_test;
1257 Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
1258 __ ldr(rscratch1, recv_addr);
1259 __ cbnz(rscratch1, next_test);
1260 __ str(recv, recv_addr);
1261 __ mov(rscratch1, DataLayout::counter_increment);
1262 __ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
1263 __ b(*update_done);
1264 __ bind(next_test);
1265 }
1266 }
1267
1268 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1269 // we always need a stub for the failure case.
1270 CodeStub* stub = op->stub();
1271 Register obj = op->object()->as_register();
1272 Register k_RInfo = op->tmp1()->as_register();
1273 Register klass_RInfo = op->tmp2()->as_register();
1274 Register dst = op->result_opr()->as_register();
1275 ciKlass* k = op->klass();
1276 Register Rtmp1 = noreg;
1277
1278 // check if it needs to be profiled
1279 ciMethodData* md;
1280 ciProfileData* data;
1281
1282 const bool should_profile = op->should_profile();
1283
1284 if (should_profile) {
1285 ciMethod* method = op->profiled_method();
1286 assert(method != nullptr, "Should have method");
1287 int bci = op->profiled_bci();
1288 md = method->method_data_or_null();
1289 assert(md != nullptr, "Sanity");
1290 data = md->bci_to_data(bci);
1291 assert(data != nullptr, "need data for type check");
1292 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1293 }
1294 Label* success_target = success;
1295 Label* failure_target = failure;
1296
1297 if (obj == k_RInfo) {
1298 k_RInfo = dst;
1299 } else if (obj == klass_RInfo) {
1300 klass_RInfo = dst;
1301 }
1302 if (k->is_loaded() && !UseCompressedClassPointers) {
1303 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1304 } else {
1305 Rtmp1 = op->tmp3()->as_register();
1306 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1307 }
1308
1309 assert_different_registers(obj, k_RInfo, klass_RInfo);
1310
1311 if (should_profile) {
1312 Register mdo = klass_RInfo;
1313 __ mov_metadata(mdo, md->constant_encoding());
1314 Label not_null;
1315 __ cbnz(obj, not_null);
1316 // Object is null; update MDO and exit
1317 Address data_addr
1318 = __ form_address(rscratch2, mdo,
1319 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1320 0);
1321 __ ldrb(rscratch1, data_addr);
1322 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1323 __ strb(rscratch1, data_addr);
1324 __ b(*obj_is_null);
1325 __ bind(not_null);
1326
1327 Label update_done;
1328 Register recv = k_RInfo;
1329 __ load_klass(recv, obj);
1330 type_profile_helper(mdo, md, data, recv, &update_done);
1331 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1332 __ addptr(counter_addr, DataLayout::counter_increment);
1333
1334 __ bind(update_done);
1335 } else {
1336 __ cbz(obj, *obj_is_null);
1337 }
1338
1339 if (!k->is_loaded()) {
1340 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1341 } else {
1342 __ mov_metadata(k_RInfo, k->constant_encoding());
1343 }
1344 __ verify_oop(obj);
1345
1346 if (op->fast_check()) {
1347 // get object class
1348 // not a safepoint as obj null check happens earlier
1349 __ load_klass(rscratch1, obj);
1350 __ cmp( rscratch1, k_RInfo);
1351
1352 __ br(Assembler::NE, *failure_target);
1353 // successful cast, fall through to profile or jump
1354 } else {
1355 // get object class
1356 // not a safepoint as obj null check happens earlier
1357 __ load_klass(klass_RInfo, obj);
1358 if (k->is_loaded()) {
1359 // See if we get an immediate positive hit
1360 __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1361 __ cmp(k_RInfo, rscratch1);
1362 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1363 __ br(Assembler::NE, *failure_target);
1364 // successful cast, fall through to profile or jump
1365 } else {
1366 // See if we get an immediate positive hit
1367 __ br(Assembler::EQ, *success_target);
1368 // check for self
1369 __ cmp(klass_RInfo, k_RInfo);
1370 __ br(Assembler::EQ, *success_target);
1371
1372 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1373 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1374 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1375 // result is a boolean
1376 __ cbzw(klass_RInfo, *failure_target);
1377 // successful cast, fall through to profile or jump
1378 }
1379 } else {
1380 // perform the fast part of the checking logic
1381 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1382 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1383 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1384 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1385 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1386 // result is a boolean
1387 __ cbz(k_RInfo, *failure_target);
1388 // successful cast, fall through to profile or jump
1389 }
1390 }
1391 __ b(*success);
1392 }
1393
1394
1395 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1396 const bool should_profile = op->should_profile();
1397
1398 LIR_Code code = op->code();
1399 if (code == lir_store_check) {
1400 Register value = op->object()->as_register();
1401 Register array = op->array()->as_register();
1402 Register k_RInfo = op->tmp1()->as_register();
1403 Register klass_RInfo = op->tmp2()->as_register();
1404 Register Rtmp1 = op->tmp3()->as_register();
1405
1406 CodeStub* stub = op->stub();
1407
1408 // check if it needs to be profiled
1409 ciMethodData* md;
1410 ciProfileData* data;
1411
1412 if (should_profile) {
1413 ciMethod* method = op->profiled_method();
1414 assert(method != nullptr, "Should have method");
1415 int bci = op->profiled_bci();
1416 md = method->method_data_or_null();
1417 assert(md != nullptr, "Sanity");
1418 data = md->bci_to_data(bci);
1419 assert(data != nullptr, "need data for type check");
1420 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1421 }
1422 Label done;
1423 Label* success_target = &done;
1424 Label* failure_target = stub->entry();
1425
1426 if (should_profile) {
1427 Label not_null;
1428 Register mdo = klass_RInfo;
1429 __ mov_metadata(mdo, md->constant_encoding());
1430 __ cbnz(value, not_null);
1431 // Object is null; update MDO and exit
1432 Address data_addr
1433 = __ form_address(rscratch2, mdo,
1434 md->byte_offset_of_slot(data, DataLayout::flags_offset()), 0);
1435 __ ldrb(rscratch1, data_addr);
1436 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1437 __ strb(rscratch1, data_addr);
1438 __ b(done);
1439 __ bind(not_null);
1440
1441 Label update_done;
1442 Register recv = k_RInfo;
1443 __ load_klass(recv, value);
1444 type_profile_helper(mdo, md, data, recv, &update_done);
1445 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1446 __ addptr(counter_addr, DataLayout::counter_increment);
1447 __ bind(update_done);
1448 } else {
1449 __ cbz(value, done);
1450 }
1451
1452 add_debug_info_for_null_check_here(op->info_for_exception());
1453 __ load_klass(k_RInfo, array);
1454 __ load_klass(klass_RInfo, value);
1455
1456 // get instance klass (it's already uncompressed)
1457 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1458 // perform the fast part of the checking logic
1459 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1460 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1461 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1462 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1463 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1464 // result is a boolean
1465 __ cbzw(k_RInfo, *failure_target);
1466 // fall through to the success case
1467
1468 __ bind(done);
1469 } else if (code == lir_checkcast) {
1470 Register obj = op->object()->as_register();
1471 Register dst = op->result_opr()->as_register();
1472 Label success;
1473 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1474 __ bind(success);
1475 if (dst != obj) {
1476 __ mov(dst, obj);
1477 }
1478 } else if (code == lir_instanceof) {
1479 Register obj = op->object()->as_register();
1480 Register dst = op->result_opr()->as_register();
1481 Label success, failure, done;
1482 emit_typecheck_helper(op, &success, &failure, &failure);
1483 __ bind(failure);
1484 __ mov(dst, zr);
1485 __ b(done);
1486 __ bind(success);
1487 __ mov(dst, 1);
1488 __ bind(done);
1489 } else {
1490 ShouldNotReachHere();
1491 }
1492 }
1493
1494 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1495 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1496 __ cset(rscratch1, Assembler::NE);
1497 __ membar(__ AnyAny);
1498 }
1499
1500 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1501 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1502 __ cset(rscratch1, Assembler::NE);
1503 __ membar(__ AnyAny);
1504 }
1505
1506
1507 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1508 Register addr;
1509 if (op->addr()->is_register()) {
1510 addr = as_reg(op->addr());
1511 } else {
1512 assert(op->addr()->is_address(), "what else?");
1513 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1514 assert(addr_ptr->disp() == 0, "need 0 disp");
1515 assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index");
1516 addr = as_reg(addr_ptr->base());
1517 }
1518 Register newval = as_reg(op->new_value());
1519 Register cmpval = as_reg(op->cmp_value());
1520
1521 if (op->code() == lir_cas_obj) {
1522 if (UseCompressedOops) {
1523 Register t1 = op->tmp1()->as_register();
1524 assert(op->tmp1()->is_valid(), "must be");
1525 __ encode_heap_oop(t1, cmpval);
1526 cmpval = t1;
1527 __ encode_heap_oop(rscratch2, newval);
1528 newval = rscratch2;
1529 casw(addr, newval, cmpval);
1530 } else {
1531 casl(addr, newval, cmpval);
1532 }
1533 } else if (op->code() == lir_cas_int) {
1534 casw(addr, newval, cmpval);
1535 } else {
1536 casl(addr, newval, cmpval);
1537 }
1538 }
1539
1540
1541 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1542 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1543 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on aarch64");
1544
1545 Assembler::Condition acond, ncond;
1546 switch (condition) {
1547 case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;
1548 case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;
1549 case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;
1550 case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;
1551 case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1552 case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;
1553 case lir_cond_belowEqual:
1554 case lir_cond_aboveEqual:
1555 default: ShouldNotReachHere();
1556 acond = Assembler::EQ; ncond = Assembler::NE; // unreachable
1557 }
1558
1559 assert(result->is_single_cpu() || result->is_double_cpu(),
1560 "expect single register for result");
1561 if (opr1->is_constant() && opr2->is_constant()
1562 && opr1->type() == T_INT && opr2->type() == T_INT) {
1563 jint val1 = opr1->as_jint();
1564 jint val2 = opr2->as_jint();
1565 if (val1 == 0 && val2 == 1) {
1566 __ cset(result->as_register(), ncond);
1567 return;
1568 } else if (val1 == 1 && val2 == 0) {
1569 __ cset(result->as_register(), acond);
1570 return;
1571 }
1572 }
1573
1574 if (opr1->is_constant() && opr2->is_constant()
1575 && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1576 jlong val1 = opr1->as_jlong();
1577 jlong val2 = opr2->as_jlong();
1578 if (val1 == 0 && val2 == 1) {
1579 __ cset(result->as_register_lo(), ncond);
1580 return;
1581 } else if (val1 == 1 && val2 == 0) {
1582 __ cset(result->as_register_lo(), acond);
1583 return;
1584 }
1585 }
1586
1587 if (opr1->is_stack()) {
1588 stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1589 opr1 = FrameMap::rscratch1_opr;
1590 } else if (opr1->is_constant()) {
1591 LIR_Opr tmp
1592 = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1593 const2reg(opr1, tmp, lir_patch_none, nullptr);
1594 opr1 = tmp;
1595 }
1596
1597 if (opr2->is_stack()) {
1598 stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1599 opr2 = FrameMap::rscratch2_opr;
1600 } else if (opr2->is_constant()) {
1601 LIR_Opr tmp
1602 = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1603 const2reg(opr2, tmp, lir_patch_none, nullptr);
1604 opr2 = tmp;
1605 }
1606
1607 if (result->type() == T_LONG)
1608 __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1609 else
1610 __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1611 }
1612
1613 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1614 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1615
1616 if (left->is_single_cpu()) {
1617 Register lreg = left->as_register();
1618 Register dreg = as_reg(dest);
1619
1620 if (right->is_single_cpu()) {
1621 // cpu register - cpu register
1622
1623 assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1624 "should be");
1625 Register rreg = right->as_register();
1626 switch (code) {
1627 case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1628 case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1629 case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1630 default: ShouldNotReachHere();
1631 }
1632
1633 } else if (right->is_double_cpu()) {
1634 Register rreg = right->as_register_lo();
1635 // single_cpu + double_cpu: can happen with obj+long
1636 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1637 switch (code) {
1638 case lir_add: __ add(dreg, lreg, rreg); break;
1639 case lir_sub: __ sub(dreg, lreg, rreg); break;
1640 default: ShouldNotReachHere();
1641 }
1642 } else if (right->is_constant()) {
1643 // cpu register - constant
1644 jlong c;
1645
1646 // FIXME. This is fugly: we really need to factor all this logic.
1647 switch(right->type()) {
1648 case T_LONG:
1649 c = right->as_constant_ptr()->as_jlong();
1650 break;
1651 case T_INT:
1652 case T_ADDRESS:
1653 c = right->as_constant_ptr()->as_jint();
1654 break;
1655 default:
1656 ShouldNotReachHere();
1657 c = 0; // unreachable
1658 break;
1659 }
1660
1661 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1662 if (c == 0 && dreg == lreg) {
1663 COMMENT("effective nop elided");
1664 return;
1665 }
1666 switch(left->type()) {
1667 case T_INT:
1668 switch (code) {
1669 case lir_add: __ addw(dreg, lreg, c); break;
1670 case lir_sub: __ subw(dreg, lreg, c); break;
1671 default: ShouldNotReachHere();
1672 }
1673 break;
1674 case T_OBJECT:
1675 case T_ADDRESS:
1676 switch (code) {
1677 case lir_add: __ add(dreg, lreg, c); break;
1678 case lir_sub: __ sub(dreg, lreg, c); break;
1679 default: ShouldNotReachHere();
1680 }
1681 break;
1682 default:
1683 ShouldNotReachHere();
1684 }
1685 } else {
1686 ShouldNotReachHere();
1687 }
1688
1689 } else if (left->is_double_cpu()) {
1690 Register lreg_lo = left->as_register_lo();
1691
1692 if (right->is_double_cpu()) {
1693 // cpu register - cpu register
1694 Register rreg_lo = right->as_register_lo();
1695 switch (code) {
1696 case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1697 case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1698 case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1699 case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1700 case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1701 default:
1702 ShouldNotReachHere();
1703 }
1704
1705 } else if (right->is_constant()) {
1706 jlong c = right->as_constant_ptr()->as_jlong();
1707 Register dreg = as_reg(dest);
1708 switch (code) {
1709 case lir_add:
1710 case lir_sub:
1711 if (c == 0 && dreg == lreg_lo) {
1712 COMMENT("effective nop elided");
1713 return;
1714 }
1715 code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);
1716 break;
1717 case lir_div:
1718 assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1719 if (c == 1) {
1720 // move lreg_lo to dreg if divisor is 1
1721 __ mov(dreg, lreg_lo);
1722 } else {
1723 unsigned int shift = log2i_exact(c);
1724 // use rscratch1 as intermediate result register
1725 __ asr(rscratch1, lreg_lo, 63);
1726 __ add(rscratch1, lreg_lo, rscratch1, Assembler::LSR, 64 - shift);
1727 __ asr(dreg, rscratch1, shift);
1728 }
1729 break;
1730 case lir_rem:
1731 assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1732 if (c == 1) {
1733 // move 0 to dreg if divisor is 1
1734 __ mov(dreg, zr);
1735 } else {
1736 // use rscratch1 as intermediate result register
1737 __ negs(rscratch1, lreg_lo);
1738 __ andr(dreg, lreg_lo, c - 1);
1739 __ andr(rscratch1, rscratch1, c - 1);
1740 __ csneg(dreg, dreg, rscratch1, Assembler::MI);
1741 }
1742 break;
1743 default:
1744 ShouldNotReachHere();
1745 }
1746 } else {
1747 ShouldNotReachHere();
1748 }
1749 } else if (left->is_single_fpu()) {
1750 assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1751 switch (code) {
1752 case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1753 case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1754 case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1755 case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1756 default:
1757 ShouldNotReachHere();
1758 }
1759 } else if (left->is_double_fpu()) {
1760 if (right->is_double_fpu()) {
1761 // fpu register - fpu register
1762 switch (code) {
1763 case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1764 case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1765 case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1766 case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1767 default:
1768 ShouldNotReachHere();
1769 }
1770 } else {
1771 if (right->is_constant()) {
1772 ShouldNotReachHere();
1773 }
1774 ShouldNotReachHere();
1775 }
1776 } else if (left->is_single_stack() || left->is_address()) {
1777 assert(left == dest, "left and dest must be equal");
1778 ShouldNotReachHere();
1779 } else {
1780 ShouldNotReachHere();
1781 }
1782 }
1783
1784 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1785 switch(code) {
1786 case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1787 case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1788 case lir_f2hf: __ flt_to_flt16(dest->as_register(), value->as_float_reg(), tmp->as_float_reg()); break;
1789 case lir_hf2f: __ flt16_to_flt(dest->as_float_reg(), value->as_register(), tmp->as_float_reg()); break;
1790 default : ShouldNotReachHere();
1791 }
1792 }
1793
1794 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1795
1796 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1797 Register Rleft = left->is_single_cpu() ? left->as_register() :
1798 left->as_register_lo();
1799 if (dst->is_single_cpu()) {
1800 Register Rdst = dst->as_register();
1801 if (right->is_constant()) {
1802 switch (code) {
1803 case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1804 case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break;
1805 case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1806 default: ShouldNotReachHere(); break;
1807 }
1808 } else {
1809 Register Rright = right->is_single_cpu() ? right->as_register() :
1810 right->as_register_lo();
1811 switch (code) {
1812 case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1813 case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break;
1814 case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1815 default: ShouldNotReachHere(); break;
1816 }
1817 }
1818 } else {
1819 Register Rdst = dst->as_register_lo();
1820 if (right->is_constant()) {
1821 switch (code) {
1822 case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1823 case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break;
1824 case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1825 default: ShouldNotReachHere(); break;
1826 }
1827 } else {
1828 Register Rright = right->is_single_cpu() ? right->as_register() :
1829 right->as_register_lo();
1830 switch (code) {
1831 case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1832 case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;
1833 case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1834 default: ShouldNotReachHere(); break;
1835 }
1836 }
1837 }
1838 }
1839
1840
1841
1842 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) {
1843
1844 // opcode check
1845 assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");
1846 bool is_irem = (code == lir_irem);
1847
1848 // operand check
1849 assert(left->is_single_cpu(), "left must be register");
1850 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
1851 assert(result->is_single_cpu(), "result must be register");
1852 Register lreg = left->as_register();
1853 Register dreg = result->as_register();
1854
1855 // power-of-2 constant check and codegen
1856 if (right->is_constant()) {
1857 int c = right->as_constant_ptr()->as_jint();
1858 assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1859 if (is_irem) {
1860 if (c == 1) {
1861 // move 0 to dreg if divisor is 1
1862 __ movw(dreg, zr);
1863 } else {
1864 // use rscratch1 as intermediate result register
1865 __ negsw(rscratch1, lreg);
1866 __ andw(dreg, lreg, c - 1);
1867 __ andw(rscratch1, rscratch1, c - 1);
1868 __ csnegw(dreg, dreg, rscratch1, Assembler::MI);
1869 }
1870 } else {
1871 if (c == 1) {
1872 // move lreg to dreg if divisor is 1
1873 __ movw(dreg, lreg);
1874 } else {
1875 unsigned int shift = exact_log2(c);
1876 // use rscratch1 as intermediate result register
1877 __ asrw(rscratch1, lreg, 31);
1878 __ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);
1879 __ asrw(dreg, rscratch1, shift);
1880 }
1881 }
1882 } else {
1883 Register rreg = right->as_register();
1884 __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1885 }
1886 }
1887
1888
1889 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1890 if (opr1->is_constant() && opr2->is_single_cpu()) {
1891 // tableswitch
1892 Register reg = as_reg(opr2);
1893 struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1894 __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1895 } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1896 Register reg1 = as_reg(opr1);
1897 if (opr2->is_single_cpu()) {
1898 // cpu register - cpu register
1899 Register reg2 = opr2->as_register();
1900 if (is_reference_type(opr1->type())) {
1901 __ cmpoop(reg1, reg2);
1902 } else {
1903 assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1904 __ cmpw(reg1, reg2);
1905 }
1906 return;
1907 }
1908 if (opr2->is_double_cpu()) {
1909 // cpu register - cpu register
1910 Register reg2 = opr2->as_register_lo();
1911 __ cmp(reg1, reg2);
1912 return;
1913 }
1914
1915 if (opr2->is_constant()) {
1916 bool is_32bit = false; // width of register operand
1917 jlong imm;
1918
1919 switch(opr2->type()) {
1920 case T_INT:
1921 imm = opr2->as_constant_ptr()->as_jint();
1922 is_32bit = true;
1923 break;
1924 case T_LONG:
1925 imm = opr2->as_constant_ptr()->as_jlong();
1926 break;
1927 case T_ADDRESS:
1928 imm = opr2->as_constant_ptr()->as_jint();
1929 break;
1930 case T_METADATA:
1931 imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1932 break;
1933 case T_OBJECT:
1934 case T_ARRAY:
1935 jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1936 __ cmpoop(reg1, rscratch1);
1937 return;
1938 default:
1939 ShouldNotReachHere();
1940 imm = 0; // unreachable
1941 break;
1942 }
1943
1944 if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1945 if (is_32bit)
1946 __ cmpw(reg1, imm);
1947 else
1948 __ subs(zr, reg1, imm);
1949 return;
1950 } else {
1951 __ mov(rscratch1, imm);
1952 if (is_32bit)
1953 __ cmpw(reg1, rscratch1);
1954 else
1955 __ cmp(reg1, rscratch1);
1956 return;
1957 }
1958 } else
1959 ShouldNotReachHere();
1960 } else if (opr1->is_single_fpu()) {
1961 FloatRegister reg1 = opr1->as_float_reg();
1962 assert(opr2->is_single_fpu(), "expect single float register");
1963 FloatRegister reg2 = opr2->as_float_reg();
1964 __ fcmps(reg1, reg2);
1965 } else if (opr1->is_double_fpu()) {
1966 FloatRegister reg1 = opr1->as_double_reg();
1967 assert(opr2->is_double_fpu(), "expect double float register");
1968 FloatRegister reg2 = opr2->as_double_reg();
1969 __ fcmpd(reg1, reg2);
1970 } else {
1971 ShouldNotReachHere();
1972 }
1973 }
1974
1975 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1976 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1977 bool is_unordered_less = (code == lir_ucmp_fd2i);
1978 if (left->is_single_fpu()) {
1979 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1980 } else if (left->is_double_fpu()) {
1981 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1982 } else {
1983 ShouldNotReachHere();
1984 }
1985 } else if (code == lir_cmp_l2i) {
1986 Label done;
1987 __ cmp(left->as_register_lo(), right->as_register_lo());
1988 __ mov(dst->as_register(), (uint64_t)-1L);
1989 __ br(Assembler::LT, done);
1990 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1991 __ bind(done);
1992 } else {
1993 ShouldNotReachHere();
1994 }
1995 }
1996
1997
1998 void LIR_Assembler::align_call(LIR_Code code) { }
1999
2000
2001 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2002 address call = __ trampoline_call(Address(op->addr(), rtype));
2003 if (call == nullptr) {
2004 bailout("trampoline stub overflow");
2005 return;
2006 }
2007 add_call_info(code_offset(), op->info());
2008 __ post_call_nop();
2009 }
2010
2011
2012 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2013 address call = __ ic_call(op->addr());
2014 if (call == nullptr) {
2015 bailout("trampoline stub overflow");
2016 return;
2017 }
2018 add_call_info(code_offset(), op->info());
2019 __ post_call_nop();
2020 }
2021
2022 void LIR_Assembler::emit_static_call_stub() {
2023 address call_pc = __ pc();
2024 address stub = __ start_a_stub(call_stub_size());
2025 if (stub == nullptr) {
2026 bailout("static call stub overflow");
2027 return;
2028 }
2029
2030 int start = __ offset();
2031
2032 __ relocate(static_stub_Relocation::spec(call_pc));
2033 __ emit_static_call_stub();
2034
2035 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2036 <= call_stub_size(), "stub too big");
2037 __ end_a_stub();
2038 }
2039
2040
2041 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2042 assert(exceptionOop->as_register() == r0, "must match");
2043 assert(exceptionPC->as_register() == r3, "must match");
2044
2045 // exception object is not added to oop map by LinearScan
2046 // (LinearScan assumes that no oops are in fixed registers)
2047 info->add_register_oop(exceptionOop);
2048 StubId unwind_id;
2049
2050 // get current pc information
2051 // pc is only needed if the method has an exception handler, the unwind code does not need it.
2052 if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
2053 // As no instructions have been generated yet for this LIR node it's
2054 // possible that an oop map already exists for the current offset.
2055 // In that case insert an dummy NOP here to ensure all oop map PCs
2056 // are unique. See JDK-8237483.
2057 __ nop();
2058 }
2059 int pc_for_athrow_offset = __ offset();
2060 InternalAddress pc_for_athrow(__ pc());
2061 __ adr(exceptionPC->as_register(), pc_for_athrow);
2062 add_call_info(pc_for_athrow_offset, info); // for exception handler
2063
2064 __ verify_not_null_oop(r0);
2065 // search an exception handler (r0: exception oop, r3: throwing pc)
2066 if (compilation()->has_fpu_code()) {
2067 unwind_id = StubId::c1_handle_exception_id;
2068 } else {
2069 unwind_id = StubId::c1_handle_exception_nofpu_id;
2070 }
2071 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2072
2073 // FIXME: enough room for two byte trap ????
2074 __ nop();
2075 }
2076
2077
2078 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2079 assert(exceptionOop->as_register() == r0, "must match");
2080
2081 __ b(_unwind_handler_entry);
2082 }
2083
2084
2085 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2086 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2087 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2088
2089 switch (left->type()) {
2090 case T_INT: {
2091 switch (code) {
2092 case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;
2093 case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;
2094 case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2095 default:
2096 ShouldNotReachHere();
2097 break;
2098 }
2099 break;
2100 case T_LONG:
2101 case T_ADDRESS:
2102 case T_OBJECT:
2103 switch (code) {
2104 case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;
2105 case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;
2106 case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2107 default:
2108 ShouldNotReachHere();
2109 break;
2110 }
2111 break;
2112 default:
2113 ShouldNotReachHere();
2114 break;
2115 }
2116 }
2117 }
2118
2119
2120 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2121 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2122 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2123
2124 switch (left->type()) {
2125 case T_INT: {
2126 switch (code) {
2127 case lir_shl: __ lslw (dreg, lreg, count); break;
2128 case lir_shr: __ asrw (dreg, lreg, count); break;
2129 case lir_ushr: __ lsrw (dreg, lreg, count); break;
2130 default:
2131 ShouldNotReachHere();
2132 break;
2133 }
2134 break;
2135 case T_LONG:
2136 case T_ADDRESS:
2137 case T_OBJECT:
2138 switch (code) {
2139 case lir_shl: __ lsl (dreg, lreg, count); break;
2140 case lir_shr: __ asr (dreg, lreg, count); break;
2141 case lir_ushr: __ lsr (dreg, lreg, count); break;
2142 default:
2143 ShouldNotReachHere();
2144 break;
2145 }
2146 break;
2147 default:
2148 ShouldNotReachHere();
2149 break;
2150 }
2151 }
2152 }
2153
2154
2155 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2156 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2157 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2158 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2159 __ str (r, Address(sp, offset_from_rsp_in_bytes));
2160 }
2161
2162
2163 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2164 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2165 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2166 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2167 __ mov (rscratch1, c);
2168 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2169 }
2170
2171
2172 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2173 ShouldNotReachHere();
2174 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2175 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2176 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2177 __ lea(rscratch1, __ constant_oop_address(o));
2178 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2179 }
2180
2181
2182 // This code replaces a call to arraycopy; no exception may
2183 // be thrown in this code, they must be thrown in the System.arraycopy
2184 // activation frame; we could save some checks if this would not be the case
2185 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2186 ciArrayKlass* default_type = op->expected_type();
2187 Register src = op->src()->as_register();
2188 Register dst = op->dst()->as_register();
2189 Register src_pos = op->src_pos()->as_register();
2190 Register dst_pos = op->dst_pos()->as_register();
2191 Register length = op->length()->as_register();
2192 Register tmp = op->tmp()->as_register();
2193
2194 CodeStub* stub = op->stub();
2195 int flags = op->flags();
2196 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2197 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2198
2199 // if we don't know anything, just go through the generic arraycopy
2200 if (default_type == nullptr // || basic_type == T_OBJECT
2201 ) {
2202 Label done;
2203 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2204
2205 // Save the arguments in case the generic arraycopy fails and we
2206 // have to fall back to the JNI stub
2207 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2208 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2209 __ str(src, Address(sp, 4*BytesPerWord));
2210
2211 address copyfunc_addr = StubRoutines::generic_arraycopy();
2212 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2213
2214 // The arguments are in java calling convention so we shift them
2215 // to C convention
2216 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2217 __ mov(c_rarg0, j_rarg0);
2218 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2219 __ mov(c_rarg1, j_rarg1);
2220 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2221 __ mov(c_rarg2, j_rarg2);
2222 assert_different_registers(c_rarg3, j_rarg4);
2223 __ mov(c_rarg3, j_rarg3);
2224 __ mov(c_rarg4, j_rarg4);
2225 #ifndef PRODUCT
2226 if (PrintC1Statistics) {
2227 __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2228 }
2229 #endif
2230 __ far_call(RuntimeAddress(copyfunc_addr));
2231
2232 __ cbz(r0, *stub->continuation());
2233
2234 // Reload values from the stack so they are where the stub
2235 // expects them.
2236 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2237 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2238 __ ldr(src, Address(sp, 4*BytesPerWord));
2239
2240 // r0 is -1^K where K == partial copied count
2241 __ eonw(rscratch1, r0, zr);
2242 // adjust length down and src/end pos up by partial copied count
2243 __ subw(length, length, rscratch1);
2244 __ addw(src_pos, src_pos, rscratch1);
2245 __ addw(dst_pos, dst_pos, rscratch1);
2246 __ b(*stub->entry());
2247
2248 __ bind(*stub->continuation());
2249 return;
2250 }
2251
2252 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2253
2254 int elem_size = type2aelembytes(basic_type);
2255 int scale = exact_log2(elem_size);
2256
2257 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2258 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2259
2260 // test for null
2261 if (flags & LIR_OpArrayCopy::src_null_check) {
2262 __ cbz(src, *stub->entry());
2263 }
2264 if (flags & LIR_OpArrayCopy::dst_null_check) {
2265 __ cbz(dst, *stub->entry());
2266 }
2267
2268 // If the compiler was not able to prove that exact type of the source or the destination
2269 // of the arraycopy is an array type, check at runtime if the source or the destination is
2270 // an instance type.
2271 if (flags & LIR_OpArrayCopy::type_check) {
2272 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2273 __ load_klass(tmp, dst);
2274 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2275 __ cmpw(rscratch1, Klass::_lh_neutral_value);
2276 __ br(Assembler::GE, *stub->entry());
2277 }
2278
2279 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2280 __ load_klass(tmp, src);
2281 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2282 __ cmpw(rscratch1, Klass::_lh_neutral_value);
2283 __ br(Assembler::GE, *stub->entry());
2284 }
2285 }
2286
2287 // check if negative
2288 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2289 __ cmpw(src_pos, 0);
2290 __ br(Assembler::LT, *stub->entry());
2291 }
2292 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2293 __ cmpw(dst_pos, 0);
2294 __ br(Assembler::LT, *stub->entry());
2295 }
2296
2297 if (flags & LIR_OpArrayCopy::length_positive_check) {
2298 __ cmpw(length, 0);
2299 __ br(Assembler::LT, *stub->entry());
2300 }
2301
2302 if (flags & LIR_OpArrayCopy::src_range_check) {
2303 __ addw(tmp, src_pos, length);
2304 __ ldrw(rscratch1, src_length_addr);
2305 __ cmpw(tmp, rscratch1);
2306 __ br(Assembler::HI, *stub->entry());
2307 }
2308 if (flags & LIR_OpArrayCopy::dst_range_check) {
2309 __ addw(tmp, dst_pos, length);
2310 __ ldrw(rscratch1, dst_length_addr);
2311 __ cmpw(tmp, rscratch1);
2312 __ br(Assembler::HI, *stub->entry());
2313 }
2314
2315 if (flags & LIR_OpArrayCopy::type_check) {
2316 // We don't know the array types are compatible
2317 if (basic_type != T_OBJECT) {
2318 // Simple test for basic type arrays
2319 __ cmp_klasses_from_objects(src, dst, tmp, rscratch1);
2320 __ br(Assembler::NE, *stub->entry());
2321 } else {
2322 // For object arrays, if src is a sub class of dst then we can
2323 // safely do the copy.
2324 Label cont, slow;
2325
2326 #define PUSH(r1, r2) \
2327 stp(r1, r2, __ pre(sp, -2 * wordSize));
2328
2329 #define POP(r1, r2) \
2330 ldp(r1, r2, __ post(sp, 2 * wordSize));
2331
2332 __ PUSH(src, dst);
2333
2334 __ load_klass(src, src);
2335 __ load_klass(dst, dst);
2336
2337 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2338
2339 __ PUSH(src, dst);
2340 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2341 __ POP(src, dst);
2342
2343 __ cbnz(src, cont);
2344
2345 __ bind(slow);
2346 __ POP(src, dst);
2347
2348 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2349 if (copyfunc_addr != nullptr) { // use stub if available
2350 // src is not a sub class of dst so we have to do a
2351 // per-element check.
2352
2353 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2354 if ((flags & mask) != mask) {
2355 // Check that at least both of them object arrays.
2356 assert(flags & mask, "one of the two should be known to be an object array");
2357
2358 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2359 __ load_klass(tmp, src);
2360 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2361 __ load_klass(tmp, dst);
2362 }
2363 int lh_offset = in_bytes(Klass::layout_helper_offset());
2364 Address klass_lh_addr(tmp, lh_offset);
2365 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2366 __ ldrw(rscratch1, klass_lh_addr);
2367 __ mov(rscratch2, objArray_lh);
2368 __ eorw(rscratch1, rscratch1, rscratch2);
2369 __ cbnzw(rscratch1, *stub->entry());
2370 }
2371
2372 // Spill because stubs can use any register they like and it's
2373 // easier to restore just those that we care about.
2374 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2375 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2376 __ str(src, Address(sp, 4*BytesPerWord));
2377
2378 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2379 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2380 assert_different_registers(c_rarg0, dst, dst_pos, length);
2381 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2382 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2383 assert_different_registers(c_rarg1, dst, length);
2384 __ uxtw(c_rarg2, length);
2385 assert_different_registers(c_rarg2, dst);
2386
2387 __ load_klass(c_rarg4, dst);
2388 __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2389 __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2390 __ far_call(RuntimeAddress(copyfunc_addr));
2391
2392 #ifndef PRODUCT
2393 if (PrintC1Statistics) {
2394 Label failed;
2395 __ cbnz(r0, failed);
2396 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2397 __ bind(failed);
2398 }
2399 #endif
2400
2401 __ cbz(r0, *stub->continuation());
2402
2403 #ifndef PRODUCT
2404 if (PrintC1Statistics) {
2405 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2406 }
2407 #endif
2408 assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2409
2410 // Restore previously spilled arguments
2411 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2412 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2413 __ ldr(src, Address(sp, 4*BytesPerWord));
2414
2415 // return value is -1^K where K is partial copied count
2416 __ eonw(rscratch1, r0, zr);
2417 // adjust length down and src/end pos up by partial copied count
2418 __ subw(length, length, rscratch1);
2419 __ addw(src_pos, src_pos, rscratch1);
2420 __ addw(dst_pos, dst_pos, rscratch1);
2421 }
2422
2423 __ b(*stub->entry());
2424
2425 __ bind(cont);
2426 __ POP(src, dst);
2427 }
2428 }
2429
2430 #ifdef ASSERT
2431 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2432 // Sanity check the known type with the incoming class. For the
2433 // primitive case the types must match exactly with src.klass and
2434 // dst.klass each exactly matching the default type. For the
2435 // object array case, if no type check is needed then either the
2436 // dst type is exactly the expected type and the src type is a
2437 // subtype which we can't check or src is the same array as dst
2438 // but not necessarily exactly of type default_type.
2439 Label known_ok, halt;
2440 __ mov_metadata(tmp, default_type->constant_encoding());
2441
2442 if (basic_type != T_OBJECT) {
2443 __ cmp_klass(dst, tmp, rscratch1);
2444 __ br(Assembler::NE, halt);
2445 __ cmp_klass(src, tmp, rscratch1);
2446 __ br(Assembler::EQ, known_ok);
2447 } else {
2448 __ cmp_klass(dst, tmp, rscratch1);
2449 __ br(Assembler::EQ, known_ok);
2450 __ cmp(src, dst);
2451 __ br(Assembler::EQ, known_ok);
2452 }
2453 __ bind(halt);
2454 __ stop("incorrect type information in arraycopy");
2455 __ bind(known_ok);
2456 }
2457 #endif
2458
2459 #ifndef PRODUCT
2460 if (PrintC1Statistics) {
2461 __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2462 }
2463 #endif
2464
2465 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2466 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2467 assert_different_registers(c_rarg0, dst, dst_pos, length);
2468 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2469 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2470 assert_different_registers(c_rarg1, dst, length);
2471 __ uxtw(c_rarg2, length);
2472 assert_different_registers(c_rarg2, dst);
2473
2474 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2475 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2476 const char *name;
2477 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2478
2479 CodeBlob *cb = CodeCache::find_blob(entry);
2480 if (cb) {
2481 __ far_call(RuntimeAddress(entry));
2482 } else {
2483 __ call_VM_leaf(entry, 3);
2484 }
2485
2486 if (stub != nullptr) {
2487 __ bind(*stub->continuation());
2488 }
2489 }
2490
2491
2492
2493
2494 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2495 Register obj = op->obj_opr()->as_register(); // may not be an oop
2496 Register hdr = op->hdr_opr()->as_register();
2497 Register lock = op->lock_opr()->as_register();
2498 Register temp = op->scratch_opr()->as_register();
2499 if (op->code() == lir_lock) {
2500 // add debug info for NullPointerException only if one is possible
2501 int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
2502 if (op->info() != nullptr) {
2503 add_debug_info_for_null_check(null_check_offset, op->info());
2504 }
2505 // done
2506 } else if (op->code() == lir_unlock) {
2507 __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry());
2508 } else {
2509 Unimplemented();
2510 }
2511 __ bind(*op->stub()->continuation());
2512 }
2513
2514 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2515 Register obj = op->obj()->as_pointer_register();
2516 Register result = op->result_opr()->as_pointer_register();
2517
2518 CodeEmitInfo* info = op->info();
2519 if (info != nullptr) {
2520 add_debug_info_for_null_check_here(info);
2521 }
2522
2523 __ load_klass(result, obj);
2524 }
2525
2526 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2527 ciMethod* method = op->profiled_method();
2528 int bci = op->profiled_bci();
2529 ciMethod* callee = op->profiled_callee();
2530
2531 // Update counter for all call types
2532 ciMethodData* md = method->method_data_or_null();
2533 assert(md != nullptr, "Sanity");
2534 ciProfileData* data = md->bci_to_data(bci);
2535 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2536 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2537 Register mdo = op->mdo()->as_register();
2538 __ mov_metadata(mdo, md->constant_encoding());
2539 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2540 // Perform additional virtual call profiling for invokevirtual and
2541 // invokeinterface bytecodes
2542 if (op->should_profile_receiver_type()) {
2543 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2544 Register recv = op->recv()->as_register();
2545 assert_different_registers(mdo, recv);
2546 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2547 ciKlass* known_klass = op->known_holder();
2548 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2549 // We know the type that will be seen at this call site; we can
2550 // statically update the MethodData* rather than needing to do
2551 // dynamic tests on the receiver type
2552
2553 // NOTE: we should probably put a lock around this search to
2554 // avoid collisions by concurrent compilations
2555 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2556 uint i;
2557 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2558 ciKlass* receiver = vc_data->receiver(i);
2559 if (known_klass->equals(receiver)) {
2560 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2561 __ addptr(data_addr, DataLayout::counter_increment);
2562 return;
2563 }
2564 }
2565
2566 // Receiver type not found in profile data; select an empty slot
2567
2568 // Note that this is less efficient than it should be because it
2569 // always does a write to the receiver part of the
2570 // VirtualCallData rather than just the first time
2571 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2572 ciKlass* receiver = vc_data->receiver(i);
2573 if (receiver == nullptr) {
2574 __ mov_metadata(rscratch1, known_klass->constant_encoding());
2575 Address recv_addr =
2576 __ form_address(rscratch2, mdo,
2577 md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
2578 LogBytesPerWord);
2579 __ str(rscratch1, recv_addr);
2580 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2581 __ addptr(data_addr, DataLayout::counter_increment);
2582 return;
2583 }
2584 }
2585 } else {
2586 __ load_klass(recv, recv);
2587 Label update_done;
2588 type_profile_helper(mdo, md, data, recv, &update_done);
2589 // Receiver did not match any saved receiver and there is no empty row for it.
2590 // Increment total counter to indicate polymorphic case.
2591 __ addptr(counter_addr, DataLayout::counter_increment);
2592
2593 __ bind(update_done);
2594 }
2595 } else {
2596 // Static call
2597 __ addptr(counter_addr, DataLayout::counter_increment);
2598 }
2599 }
2600
2601
2602 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2603 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2604 }
2605
2606 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2607 assert(op->crc()->is_single_cpu(), "crc must be register");
2608 assert(op->val()->is_single_cpu(), "byte value must be register");
2609 assert(op->result_opr()->is_single_cpu(), "result must be register");
2610 Register crc = op->crc()->as_register();
2611 Register val = op->val()->as_register();
2612 Register res = op->result_opr()->as_register();
2613
2614 assert_different_registers(val, crc, res);
2615 uint64_t offset;
2616 __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2617 __ add(res, res, offset);
2618
2619 __ mvnw(crc, crc); // ~crc
2620 __ update_byte_crc32(crc, val, res);
2621 __ mvnw(res, crc); // ~crc
2622 }
2623
2624 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2625 COMMENT("emit_profile_type {");
2626 Register obj = op->obj()->as_register();
2627 Register tmp = op->tmp()->as_pointer_register();
2628 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2629 ciKlass* exact_klass = op->exact_klass();
2630 intptr_t current_klass = op->current_klass();
2631 bool not_null = op->not_null();
2632 bool no_conflict = op->no_conflict();
2633
2634 Label update, next, none;
2635
2636 bool do_null = !not_null;
2637 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2638 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2639
2640 assert(do_null || do_update, "why are we here?");
2641 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2642 assert(mdo_addr.base() != rscratch1, "wrong register");
2643
2644 __ verify_oop(obj);
2645
2646 if (tmp != obj) {
2647 assert_different_registers(obj, tmp, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
2648 __ mov(tmp, obj);
2649 } else {
2650 assert_different_registers(obj, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
2651 }
2652 if (do_null) {
2653 __ cbnz(tmp, update);
2654 if (!TypeEntries::was_null_seen(current_klass)) {
2655 __ ldr(rscratch2, mdo_addr);
2656 __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2657 __ str(rscratch2, mdo_addr);
2658 }
2659 if (do_update) {
2660 #ifndef ASSERT
2661 __ b(next);
2662 }
2663 #else
2664 __ b(next);
2665 }
2666 } else {
2667 __ cbnz(tmp, update);
2668 __ stop("unexpected null obj");
2669 #endif
2670 }
2671
2672 __ bind(update);
2673
2674 if (do_update) {
2675 #ifdef ASSERT
2676 if (exact_klass != nullptr) {
2677 Label ok;
2678 __ load_klass(tmp, tmp);
2679 __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2680 __ eor(rscratch1, tmp, rscratch1);
2681 __ cbz(rscratch1, ok);
2682 __ stop("exact klass and actual klass differ");
2683 __ bind(ok);
2684 }
2685 #endif
2686 if (!no_conflict) {
2687 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2688 if (exact_klass != nullptr) {
2689 __ mov_metadata(tmp, exact_klass->constant_encoding());
2690 } else {
2691 __ load_klass(tmp, tmp);
2692 }
2693
2694 __ ldr(rscratch2, mdo_addr);
2695 __ eor(tmp, tmp, rscratch2);
2696 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2697 // klass seen before, nothing to do. The unknown bit may have been
2698 // set already but no need to check.
2699 __ cbz(rscratch1, next);
2700
2701 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2702
2703 if (TypeEntries::is_type_none(current_klass)) {
2704 __ cbz(rscratch2, none);
2705 __ cmp(rscratch2, (u1)TypeEntries::null_seen);
2706 __ br(Assembler::EQ, none);
2707 // There is a chance that the checks above
2708 // fail if another thread has just set the
2709 // profiling to this obj's klass
2710 __ dmb(Assembler::ISHLD);
2711 __ eor(tmp, tmp, rscratch2); // get back original value before XOR
2712 __ ldr(rscratch2, mdo_addr);
2713 __ eor(tmp, tmp, rscratch2);
2714 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2715 __ cbz(rscratch1, next);
2716 }
2717 } else {
2718 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2719 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2720
2721 __ ldr(tmp, mdo_addr);
2722 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2723 }
2724
2725 // different than before. Cannot keep accurate profile.
2726 __ ldr(rscratch2, mdo_addr);
2727 __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2728 __ str(rscratch2, mdo_addr);
2729
2730 if (TypeEntries::is_type_none(current_klass)) {
2731 __ b(next);
2732
2733 __ bind(none);
2734 // first time here. Set profile type.
2735 __ str(tmp, mdo_addr);
2736 #ifdef ASSERT
2737 __ andr(tmp, tmp, TypeEntries::type_mask);
2738 __ verify_klass_ptr(tmp);
2739 #endif
2740 }
2741 } else {
2742 // There's a single possible klass at this profile point
2743 assert(exact_klass != nullptr, "should be");
2744 if (TypeEntries::is_type_none(current_klass)) {
2745 __ mov_metadata(tmp, exact_klass->constant_encoding());
2746 __ ldr(rscratch2, mdo_addr);
2747 __ eor(tmp, tmp, rscratch2);
2748 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2749 __ cbz(rscratch1, next);
2750 #ifdef ASSERT
2751 {
2752 Label ok;
2753 __ ldr(rscratch1, mdo_addr);
2754 __ cbz(rscratch1, ok);
2755 __ cmp(rscratch1, (u1)TypeEntries::null_seen);
2756 __ br(Assembler::EQ, ok);
2757 // may have been set by another thread
2758 __ dmb(Assembler::ISHLD);
2759 __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2760 __ ldr(rscratch2, mdo_addr);
2761 __ eor(rscratch2, rscratch1, rscratch2);
2762 __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2763 __ cbz(rscratch2, ok);
2764
2765 __ stop("unexpected profiling mismatch");
2766 __ bind(ok);
2767 }
2768 #endif
2769 // first time here. Set profile type.
2770 __ str(tmp, mdo_addr);
2771 #ifdef ASSERT
2772 __ andr(tmp, tmp, TypeEntries::type_mask);
2773 __ verify_klass_ptr(tmp);
2774 #endif
2775 } else {
2776 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2777 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2778
2779 __ ldr(tmp, mdo_addr);
2780 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2781
2782 __ orr(tmp, tmp, TypeEntries::type_unknown);
2783 __ str(tmp, mdo_addr);
2784 // FIXME: Write barrier needed here?
2785 }
2786 }
2787
2788 __ bind(next);
2789 }
2790 COMMENT("} emit_profile_type");
2791 }
2792
2793
2794 void LIR_Assembler::align_backward_branch_target() {
2795 }
2796
2797
2798 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2799 // tmp must be unused
2800 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2801
2802 if (left->is_single_cpu()) {
2803 assert(dest->is_single_cpu(), "expect single result reg");
2804 __ negw(dest->as_register(), left->as_register());
2805 } else if (left->is_double_cpu()) {
2806 assert(dest->is_double_cpu(), "expect double result reg");
2807 __ neg(dest->as_register_lo(), left->as_register_lo());
2808 } else if (left->is_single_fpu()) {
2809 assert(dest->is_single_fpu(), "expect single float result reg");
2810 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2811 } else {
2812 assert(left->is_double_fpu(), "expect double float operand reg");
2813 assert(dest->is_double_fpu(), "expect double float result reg");
2814 __ fnegd(dest->as_double_reg(), left->as_double_reg());
2815 }
2816 }
2817
2818
2819 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2820 if (patch_code != lir_patch_none) {
2821 deoptimize_trap(info);
2822 return;
2823 }
2824
2825 __ lea(dest->as_pointer_register(), as_Address(addr->as_address_ptr()));
2826 }
2827
2828
2829 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2830 assert(!tmp->is_valid(), "don't need temporary");
2831
2832 CodeBlob *cb = CodeCache::find_blob(dest);
2833 if (cb) {
2834 __ far_call(RuntimeAddress(dest));
2835 } else {
2836 __ mov(rscratch1, RuntimeAddress(dest));
2837 __ blr(rscratch1);
2838 }
2839
2840 if (info != nullptr) {
2841 add_call_info_here(info);
2842 }
2843 __ post_call_nop();
2844 }
2845
2846 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2847 if (dest->is_address() || src->is_address()) {
2848 move_op(src, dest, type, lir_patch_none, info, /*wide*/false);
2849 } else {
2850 ShouldNotReachHere();
2851 }
2852 }
2853
2854 #ifdef ASSERT
2855 // emit run-time assertion
2856 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2857 assert(op->code() == lir_assert, "must be");
2858
2859 if (op->in_opr1()->is_valid()) {
2860 assert(op->in_opr2()->is_valid(), "both operands must be valid");
2861 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2862 } else {
2863 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2864 assert(op->condition() == lir_cond_always, "no other conditions allowed");
2865 }
2866
2867 Label ok;
2868 if (op->condition() != lir_cond_always) {
2869 Assembler::Condition acond = Assembler::AL;
2870 switch (op->condition()) {
2871 case lir_cond_equal: acond = Assembler::EQ; break;
2872 case lir_cond_notEqual: acond = Assembler::NE; break;
2873 case lir_cond_less: acond = Assembler::LT; break;
2874 case lir_cond_lessEqual: acond = Assembler::LE; break;
2875 case lir_cond_greaterEqual: acond = Assembler::GE; break;
2876 case lir_cond_greater: acond = Assembler::GT; break;
2877 case lir_cond_belowEqual: acond = Assembler::LS; break;
2878 case lir_cond_aboveEqual: acond = Assembler::HS; break;
2879 default: ShouldNotReachHere();
2880 }
2881 __ br(acond, ok);
2882 }
2883 if (op->halt()) {
2884 const char* str = __ code_string(op->msg());
2885 __ stop(str);
2886 } else {
2887 breakpoint();
2888 }
2889 __ bind(ok);
2890 }
2891 #endif
2892
2893 #ifndef PRODUCT
2894 #define COMMENT(x) do { __ block_comment(x); } while (0)
2895 #else
2896 #define COMMENT(x)
2897 #endif
2898
2899 void LIR_Assembler::membar() {
2900 COMMENT("membar");
2901 __ membar(MacroAssembler::AnyAny);
2902 }
2903
2904 void LIR_Assembler::membar_acquire() {
2905 __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2906 }
2907
2908 void LIR_Assembler::membar_release() {
2909 __ membar(Assembler::LoadStore|Assembler::StoreStore);
2910 }
2911
2912 void LIR_Assembler::membar_loadload() {
2913 __ membar(Assembler::LoadLoad);
2914 }
2915
2916 void LIR_Assembler::membar_storestore() {
2917 __ membar(MacroAssembler::StoreStore);
2918 }
2919
2920 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2921
2922 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2923
2924 void LIR_Assembler::on_spin_wait() {
2925 __ spin_wait();
2926 }
2927
2928 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2929 __ mov(result_reg->as_register(), rthread);
2930 }
2931
2932
2933 void LIR_Assembler::peephole(LIR_List *lir) {
2934 #if 0
2935 if (tableswitch_count >= max_tableswitches)
2936 return;
2937
2938 /*
2939 This finite-state automaton recognizes sequences of compare-and-
2940 branch instructions. We will turn them into a tableswitch. You
2941 could argue that C1 really shouldn't be doing this sort of
2942 optimization, but without it the code is really horrible.
2943 */
2944
2945 enum { start_s, cmp1_s, beq_s, cmp_s } state;
2946 int first_key, last_key = -2147483648;
2947 int next_key = 0;
2948 int start_insn = -1;
2949 int last_insn = -1;
2950 Register reg = noreg;
2951 LIR_Opr reg_opr;
2952 state = start_s;
2953
2954 LIR_OpList* inst = lir->instructions_list();
2955 for (int i = 0; i < inst->length(); i++) {
2956 LIR_Op* op = inst->at(i);
2957 switch (state) {
2958 case start_s:
2959 first_key = -1;
2960 start_insn = i;
2961 switch (op->code()) {
2962 case lir_cmp:
2963 LIR_Opr opr1 = op->as_Op2()->in_opr1();
2964 LIR_Opr opr2 = op->as_Op2()->in_opr2();
2965 if (opr1->is_cpu_register() && opr1->is_single_cpu()
2966 && opr2->is_constant()
2967 && opr2->type() == T_INT) {
2968 reg_opr = opr1;
2969 reg = opr1->as_register();
2970 first_key = opr2->as_constant_ptr()->as_jint();
2971 next_key = first_key + 1;
2972 state = cmp_s;
2973 goto next_state;
2974 }
2975 break;
2976 }
2977 break;
2978 case cmp_s:
2979 switch (op->code()) {
2980 case lir_branch:
2981 if (op->as_OpBranch()->cond() == lir_cond_equal) {
2982 state = beq_s;
2983 last_insn = i;
2984 goto next_state;
2985 }
2986 }
2987 state = start_s;
2988 break;
2989 case beq_s:
2990 switch (op->code()) {
2991 case lir_cmp: {
2992 LIR_Opr opr1 = op->as_Op2()->in_opr1();
2993 LIR_Opr opr2 = op->as_Op2()->in_opr2();
2994 if (opr1->is_cpu_register() && opr1->is_single_cpu()
2995 && opr1->as_register() == reg
2996 && opr2->is_constant()
2997 && opr2->type() == T_INT
2998 && opr2->as_constant_ptr()->as_jint() == next_key) {
2999 last_key = next_key;
3000 next_key++;
3001 state = cmp_s;
3002 goto next_state;
3003 }
3004 }
3005 }
3006 last_key = next_key;
3007 state = start_s;
3008 break;
3009 default:
3010 assert(false, "impossible state");
3011 }
3012 if (state == start_s) {
3013 if (first_key < last_key - 5L && reg != noreg) {
3014 {
3015 // printf("found run register %d starting at insn %d low value %d high value %d\n",
3016 // reg->encoding(),
3017 // start_insn, first_key, last_key);
3018 // for (int i = 0; i < inst->length(); i++) {
3019 // inst->at(i)->print();
3020 // tty->print("\n");
3021 // }
3022 // tty->print("\n");
3023 }
3024
3025 struct tableswitch *sw = &switches[tableswitch_count];
3026 sw->_insn_index = start_insn, sw->_first_key = first_key,
3027 sw->_last_key = last_key, sw->_reg = reg;
3028 inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3029 {
3030 // Insert the new table of branches
3031 int offset = last_insn;
3032 for (int n = first_key; n < last_key; n++) {
3033 inst->insert_before
3034 (last_insn + 1,
3035 new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3036 inst->at(offset)->as_OpBranch()->label()));
3037 offset -= 2, i++;
3038 }
3039 }
3040 // Delete all the old compare-and-branch instructions
3041 for (int n = first_key; n < last_key; n++) {
3042 inst->remove_at(start_insn);
3043 inst->remove_at(start_insn);
3044 }
3045 // Insert the tableswitch instruction
3046 inst->insert_before(start_insn,
3047 new LIR_Op2(lir_cmp, lir_cond_always,
3048 LIR_OprFact::intConst(tableswitch_count),
3049 reg_opr));
3050 inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3051 tableswitch_count++;
3052 }
3053 reg = noreg;
3054 last_key = -2147483648;
3055 }
3056 next_state:
3057 ;
3058 }
3059 #endif
3060 }
3061
3062 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3063 Address addr = as_Address(src->as_address_ptr());
3064 BasicType type = src->type();
3065 bool is_oop = is_reference_type(type);
3066
3067 void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3068 void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3069
3070 switch(type) {
3071 case T_INT:
3072 xchg = &MacroAssembler::atomic_xchgalw;
3073 add = &MacroAssembler::atomic_addalw;
3074 break;
3075 case T_LONG:
3076 xchg = &MacroAssembler::atomic_xchgal;
3077 add = &MacroAssembler::atomic_addal;
3078 break;
3079 case T_OBJECT:
3080 case T_ARRAY:
3081 if (UseCompressedOops) {
3082 xchg = &MacroAssembler::atomic_xchgalw;
3083 add = &MacroAssembler::atomic_addalw;
3084 } else {
3085 xchg = &MacroAssembler::atomic_xchgal;
3086 add = &MacroAssembler::atomic_addal;
3087 }
3088 break;
3089 default:
3090 ShouldNotReachHere();
3091 xchg = &MacroAssembler::atomic_xchgal;
3092 add = &MacroAssembler::atomic_addal; // unreachable
3093 }
3094
3095 switch (code) {
3096 case lir_xadd:
3097 {
3098 RegisterOrConstant inc;
3099 Register tmp = as_reg(tmp_op);
3100 Register dst = as_reg(dest);
3101 if (data->is_constant()) {
3102 inc = RegisterOrConstant(as_long(data));
3103 assert_different_registers(dst, addr.base(), tmp,
3104 rscratch1, rscratch2);
3105 } else {
3106 inc = RegisterOrConstant(as_reg(data));
3107 assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3108 rscratch1, rscratch2);
3109 }
3110 __ lea(tmp, addr);
3111 (_masm->*add)(dst, inc, tmp);
3112 break;
3113 }
3114 case lir_xchg:
3115 {
3116 Register tmp = tmp_op->as_register();
3117 Register obj = as_reg(data);
3118 Register dst = as_reg(dest);
3119 if (is_oop && UseCompressedOops) {
3120 __ encode_heap_oop(rscratch2, obj);
3121 obj = rscratch2;
3122 }
3123 assert_different_registers(obj, addr.base(), tmp, rscratch1);
3124 assert_different_registers(dst, addr.base(), tmp, rscratch1);
3125 __ lea(tmp, addr);
3126 (_masm->*xchg)(dst, obj, tmp);
3127 if (is_oop && UseCompressedOops) {
3128 __ decode_heap_oop(dst);
3129 }
3130 }
3131 break;
3132 default:
3133 ShouldNotReachHere();
3134 }
3135 if(!UseLSE) {
3136 __ membar(__ AnyAny);
3137 }
3138 }
3139
3140 #undef __