1 /*
2 * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.inline.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_LIRAssembler.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "c1/c1_ValueStack.hpp"
31 #include "ci/ciArrayKlass.hpp"
32 #include "ci/ciInstance.hpp"
33 #include "gc/shared/collectedHeap.hpp"
34 #include "memory/universe.hpp"
35 #include "nativeInst_arm.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_arm.inline.hpp"
42
43 #define __ _masm->
44
45 // Note: Rtemp usage is this file should not impact C2 and should be
46 // correct as long as it is not implicitly used in lower layers (the
47 // arm [macro]assembler) and used with care in the other C1 specific
48 // files.
49
50 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
51 ShouldNotCallThis(); // Not used on ARM
52 return false;
53 }
54
55
56 LIR_Opr LIR_Assembler::receiverOpr() {
57 // The first register in Java calling conventions
58 return FrameMap::R0_oop_opr;
59 }
60
61 LIR_Opr LIR_Assembler::osrBufferPointer() {
62 return FrameMap::as_pointer_opr(R0);
63 }
64
65 #ifndef PRODUCT
66 void LIR_Assembler::verify_reserved_argument_area_size(int args_count) {
67 assert(args_count * wordSize <= frame_map()->reserved_argument_area_size(), "not enough space for arguments");
68 }
69 #endif // !PRODUCT
70
71 void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) {
72 assert(offset_from_sp_in_words >= 0, "invalid offset from sp");
73 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;
74 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space");
75 __ mov_slow(Rtemp, c);
76 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes));
77 }
78
79 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_sp_in_words) {
80 assert(offset_from_sp_in_words >= 0, "invalid offset from sp");
81 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;
82 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space");
83 __ mov_metadata(Rtemp, m);
84 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes));
85 }
86
87 //--------------fpu register translations-----------------------
88
89
90 void LIR_Assembler::breakpoint() {
91 __ breakpoint();
92 }
93
94 void LIR_Assembler::push(LIR_Opr opr) {
95 Unimplemented();
96 }
97
98 void LIR_Assembler::pop(LIR_Opr opr) {
99 Unimplemented();
100 }
101
102 //-------------------------------------------
103 Address LIR_Assembler::as_Address(LIR_Address* addr) {
104 Register base = addr->base()->as_pointer_register();
105
106
107 if (addr->index()->is_illegal() || addr->index()->is_constant()) {
108 int offset = addr->disp();
109 if (addr->index()->is_constant()) {
110 offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale();
111 }
112
113 if ((offset <= -4096) || (offset >= 4096)) {
114 BAILOUT_("offset not in range", Address(base));
115 }
116
117 return Address(base, offset);
118
119 } else {
120 assert(addr->disp() == 0, "can't have both");
121 int scale = addr->scale();
122
123 assert(addr->index()->is_single_cpu(), "should be");
124 return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) :
125 Address(base, addr->index()->as_register(), lsr, -scale);
126 }
127 }
128
129 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
130 Address base = as_Address(addr);
131 assert(base.index() == noreg, "must be");
132 if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); }
133 return Address(base.base(), base.disp() + BytesPerWord);
134 }
135
136 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
137 return as_Address(addr);
138 }
139
140
141 void LIR_Assembler::osr_entry() {
142 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
143 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
144 ValueStack* entry_state = osr_entry->end()->state();
145 int number_of_locks = entry_state->locks_size();
146
147 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
148 Register OSR_buf = osrBufferPointer()->as_pointer_register();
149
150 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
151 int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord;
152 for (int i = 0; i < number_of_locks; i++) {
153 int slot_offset = monitor_offset - (i * 2 * BytesPerWord);
154 if (slot_offset >= 4096 - BytesPerWord) {
155 __ add_slow(R2, OSR_buf, slot_offset);
156 __ ldr(R1, Address(R2, 0*BytesPerWord));
157 __ ldr(R2, Address(R2, 1*BytesPerWord));
158 } else {
159 __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord));
160 __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord));
161 }
162 __ str(R1, frame_map()->address_for_monitor_lock(i));
163 __ str(R2, frame_map()->address_for_monitor_object(i));
164 }
165 }
166
167
168 int LIR_Assembler::check_icache() {
169 return __ ic_check(CodeEntryAlignment);
170 }
171
172 void LIR_Assembler::clinit_barrier(ciMethod* method) {
173 ShouldNotReachHere(); // not implemented
174 }
175
176 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
177 jobject o = (jobject)Universe::non_oop_word();
178 int index = __ oop_recorder()->allocate_oop_index(o);
179
180 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index);
181
182 __ patchable_mov_oop(reg, o, index);
183 patching_epilog(patch, lir_patch_normal, reg, info);
184 }
185
186
187 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
188 Metadata* o = (Metadata*)Universe::non_oop_word();
189 int index = __ oop_recorder()->allocate_metadata_index(o);
190 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
191
192 __ patchable_mov_metadata(reg, o, index);
193 patching_epilog(patch, lir_patch_normal, reg, info);
194 }
195
196
197 int LIR_Assembler::initial_frame_size_in_bytes() const {
198 // Subtracts two words to account for return address and link
199 return frame_map()->framesize()*VMRegImpl::stack_slot_size - 2*wordSize;
200 }
201
202
203 int LIR_Assembler::emit_exception_handler() {
204 address handler_base = __ start_a_stub(exception_handler_size());
205 if (handler_base == nullptr) {
206 bailout("exception handler overflow");
207 return -1;
208 }
209
210 int offset = code_offset();
211
212 // check that there is really an exception
213 __ verify_not_null_oop(Rexception_obj);
214
215 __ call(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id), relocInfo::runtime_call_type);
216 __ should_not_reach_here();
217
218 assert(code_offset() - offset <= exception_handler_size(), "overflow");
219 __ end_a_stub();
220
221 return offset;
222 }
223
224 // Emit the code to remove the frame from the stack in the exception
225 // unwind path.
226 int LIR_Assembler::emit_unwind_handler() {
227 #ifndef PRODUCT
228 if (CommentedAssembly) {
229 _masm->block_comment("Unwind handler");
230 }
231 #endif
232
233 int offset = code_offset();
234
235 // Fetch the exception from TLS and clear out exception related thread state
236 Register zero = __ zero_register(Rtemp);
237 __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
238 __ str(zero, Address(Rthread, JavaThread::exception_oop_offset()));
239 __ str(zero, Address(Rthread, JavaThread::exception_pc_offset()));
240
241 __ bind(_unwind_handler_entry);
242 __ verify_not_null_oop(Rexception_obj);
243
244 // Perform needed unlocking
245 MonitorExitStub* stub = nullptr;
246 if (method()->is_synchronized()) {
247 monitor_address(0, FrameMap::R0_opr);
248 stub = new MonitorExitStub(FrameMap::R0_opr, 0);
249 __ unlock_object(R2, R1, R0, *stub->entry());
250 __ bind(*stub->continuation());
251 }
252
253 // remove the activation and dispatch to the unwind handler
254 __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR
255 __ jump(Runtime1::entry_for(StubId::c1_unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
256
257 // Emit the slow path assembly
258 if (stub != nullptr) {
259 stub->emit_code(this);
260 }
261
262 return offset;
263 }
264
265
266 int LIR_Assembler::emit_deopt_handler() {
267 address handler_base = __ start_a_stub(deopt_handler_size());
268 if (handler_base == nullptr) {
269 bailout("deopt handler overflow");
270 return -1;
271 }
272
273 int offset = code_offset();
274
275 __ mov_relative_address(LR, __ pc());
276 __ push(LR); // stub expects LR to be saved
277 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
278
279 assert(code_offset() - offset <= deopt_handler_size(), "overflow");
280 __ end_a_stub();
281
282 return offset;
283 }
284
285
286 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
287 // Pop the frame before safepoint polling
288 __ remove_frame(initial_frame_size_in_bytes());
289 __ read_polling_page(Rtemp, relocInfo::poll_return_type);
290 __ ret();
291 }
292
293 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
294
295 int offset = __ offset();
296 __ get_polling_page(Rtemp);
297 __ relocate(relocInfo::poll_type);
298 add_debug_info_for_branch(info); // help pc_desc_at to find correct scope for current PC
299 __ ldr(Rtemp, Address(Rtemp));
300
301 return offset;
302 }
303
304
305 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
306 if (from_reg != to_reg) {
307 __ mov(to_reg, from_reg);
308 }
309 }
310
311 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
312 assert(src->is_constant() && dest->is_register(), "must be");
313 LIR_Const* c = src->as_constant_ptr();
314
315 switch (c->type()) {
316 case T_ADDRESS:
317 case T_INT:
318 assert(patch_code == lir_patch_none, "no patching handled here");
319 __ mov_slow(dest->as_register(), c->as_jint());
320 break;
321
322 case T_LONG:
323 assert(patch_code == lir_patch_none, "no patching handled here");
324 __ mov_slow(dest->as_register_lo(), c->as_jint_lo());
325 __ mov_slow(dest->as_register_hi(), c->as_jint_hi());
326 break;
327
328 case T_OBJECT:
329 if (patch_code == lir_patch_none) {
330 __ mov_oop(dest->as_register(), c->as_jobject());
331 } else {
332 jobject2reg_with_patching(dest->as_register(), info);
333 }
334 break;
335
336 case T_METADATA:
337 if (patch_code == lir_patch_none) {
338 __ mov_metadata(dest->as_register(), c->as_metadata());
339 } else {
340 klass2reg_with_patching(dest->as_register(), info);
341 }
342 break;
343
344 case T_FLOAT:
345 if (dest->is_single_fpu()) {
346 __ mov_float(dest->as_float_reg(), c->as_jfloat());
347 } else {
348 // Simple getters can return float constant directly into r0
349 __ mov_slow(dest->as_register(), c->as_jint_bits());
350 }
351 break;
352
353 case T_DOUBLE:
354 if (dest->is_double_fpu()) {
355 __ mov_double(dest->as_double_reg(), c->as_jdouble());
356 } else {
357 // Simple getters can return double constant directly into r1r0
358 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits());
359 __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits());
360 }
361 break;
362
363 default:
364 ShouldNotReachHere();
365 }
366 }
367
368 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
369 assert(src->is_constant(), "must be");
370 assert(dest->is_stack(), "must be");
371 LIR_Const* c = src->as_constant_ptr();
372
373 switch (c->type()) {
374 case T_INT: // fall through
375 case T_FLOAT:
376 __ mov_slow(Rtemp, c->as_jint_bits());
377 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
378 break;
379
380 case T_ADDRESS:
381 __ mov_slow(Rtemp, c->as_jint());
382 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
383 break;
384
385 case T_OBJECT:
386 __ mov_oop(Rtemp, c->as_jobject());
387 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
388 break;
389
390 case T_LONG: // fall through
391 case T_DOUBLE:
392 __ mov_slow(Rtemp, c->as_jint_lo_bits());
393 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
394 if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) {
395 __ mov_slow(Rtemp, c->as_jint_hi_bits());
396 }
397 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
398 break;
399
400 default:
401 ShouldNotReachHere();
402 }
403 }
404
405 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
406 CodeEmitInfo* info, bool wide) {
407 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == nullptr),"cannot handle otherwise");
408 __ mov(Rtemp, 0);
409
410 int null_check_offset = code_offset();
411 __ str(Rtemp, as_Address(dest->as_address_ptr()));
412
413 if (info != nullptr) {
414 assert(false, "arm32 didn't support this before, investigate if bug");
415 add_debug_info_for_null_check(null_check_offset, info);
416 }
417 }
418
419 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
420 assert(src->is_register() && dest->is_register(), "must be");
421
422 if (src->is_single_cpu()) {
423 if (dest->is_single_cpu()) {
424 move_regs(src->as_register(), dest->as_register());
425 } else if (dest->is_single_fpu()) {
426 __ fmsr(dest->as_float_reg(), src->as_register());
427 } else {
428 ShouldNotReachHere();
429 }
430 } else if (src->is_double_cpu()) {
431 if (dest->is_double_cpu()) {
432 __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi());
433 } else {
434 __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi());
435 }
436 } else if (src->is_single_fpu()) {
437 if (dest->is_single_fpu()) {
438 __ mov_float(dest->as_float_reg(), src->as_float_reg());
439 } else if (dest->is_single_cpu()) {
440 __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg());
441 } else {
442 ShouldNotReachHere();
443 }
444 } else if (src->is_double_fpu()) {
445 if (dest->is_double_fpu()) {
446 __ mov_double(dest->as_double_reg(), src->as_double_reg());
447 } else if (dest->is_double_cpu()) {
448 __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg());
449 } else {
450 ShouldNotReachHere();
451 }
452 } else {
453 ShouldNotReachHere();
454 }
455 }
456
457 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
458 assert(src->is_register(), "should not call otherwise");
459 assert(dest->is_stack(), "should not call otherwise");
460
461 Address addr = dest->is_single_word() ?
462 frame_map()->address_for_slot(dest->single_stack_ix()) :
463 frame_map()->address_for_slot(dest->double_stack_ix());
464
465 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
466 if (src->is_single_fpu() || src->is_double_fpu()) {
467 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
468 }
469
470 if (src->is_single_cpu()) {
471 switch (type) {
472 case T_OBJECT:
473 case T_ARRAY: __ verify_oop(src->as_register()); // fall through
474 case T_ADDRESS:
475 case T_METADATA: __ str(src->as_register(), addr); break;
476 case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through
477 case T_INT: __ str_32(src->as_register(), addr); break;
478 default:
479 ShouldNotReachHere();
480 }
481 } else if (src->is_double_cpu()) {
482 __ str(src->as_register_lo(), addr);
483 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
484 } else if (src->is_single_fpu()) {
485 __ str_float(src->as_float_reg(), addr);
486 } else if (src->is_double_fpu()) {
487 __ str_double(src->as_double_reg(), addr);
488 } else {
489 ShouldNotReachHere();
490 }
491 }
492
493
494 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
495 LIR_PatchCode patch_code, CodeEmitInfo* info,
496 bool wide) {
497 LIR_Address* to_addr = dest->as_address_ptr();
498 Register base_reg = to_addr->base()->as_pointer_register();
499 const bool needs_patching = (patch_code != lir_patch_none);
500
501 PatchingStub* patch = nullptr;
502 if (needs_patching) {
503 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
504 }
505
506 int null_check_offset = code_offset();
507
508 switch (type) {
509 case T_ARRAY:
510 case T_OBJECT:
511 if (UseCompressedOops && !wide) {
512 ShouldNotReachHere();
513 } else {
514 __ str(src->as_register(), as_Address(to_addr));
515 }
516 break;
517
518 case T_ADDRESS:
519 __ str(src->as_pointer_register(), as_Address(to_addr));
520 break;
521
522 case T_BYTE:
523 case T_BOOLEAN:
524 __ strb(src->as_register(), as_Address(to_addr));
525 break;
526
527 case T_CHAR:
528 case T_SHORT:
529 __ strh(src->as_register(), as_Address(to_addr));
530 break;
531
532 case T_INT:
533 #ifdef __SOFTFP__
534 case T_FLOAT:
535 #endif // __SOFTFP__
536 __ str_32(src->as_register(), as_Address(to_addr));
537 break;
538
539
540 #ifdef __SOFTFP__
541 case T_DOUBLE:
542 #endif // __SOFTFP__
543 case T_LONG: {
544 Register from_lo = src->as_register_lo();
545 Register from_hi = src->as_register_hi();
546 if (to_addr->index()->is_register()) {
547 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
548 assert(to_addr->disp() == 0, "Not yet supporting both");
549 __ add(Rtemp, base_reg, to_addr->index()->as_register());
550 base_reg = Rtemp;
551 __ str(from_lo, Address(Rtemp));
552 if (patch != nullptr) {
553 __ nop(); // see comment before patching_epilog for 2nd str
554 patching_epilog(patch, lir_patch_low, base_reg, info);
555 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
556 patch_code = lir_patch_high;
557 }
558 __ str(from_hi, Address(Rtemp, BytesPerWord));
559 } else if (base_reg == from_lo) {
560 __ str(from_hi, as_Address_hi(to_addr));
561 if (patch != nullptr) {
562 __ nop(); // see comment before patching_epilog for 2nd str
563 patching_epilog(patch, lir_patch_high, base_reg, info);
564 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
565 patch_code = lir_patch_low;
566 }
567 __ str(from_lo, as_Address_lo(to_addr));
568 } else {
569 __ str(from_lo, as_Address_lo(to_addr));
570 if (patch != nullptr) {
571 __ nop(); // see comment before patching_epilog for 2nd str
572 patching_epilog(patch, lir_patch_low, base_reg, info);
573 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
574 patch_code = lir_patch_high;
575 }
576 __ str(from_hi, as_Address_hi(to_addr));
577 }
578 break;
579 }
580
581 #ifndef __SOFTFP__
582 case T_FLOAT:
583 if (to_addr->index()->is_register()) {
584 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
585 __ add(Rtemp, base_reg, to_addr->index()->as_register());
586 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
587 __ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp()));
588 } else {
589 __ fsts(src->as_float_reg(), as_Address(to_addr));
590 }
591 break;
592
593 case T_DOUBLE:
594 if (to_addr->index()->is_register()) {
595 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
596 __ add(Rtemp, base_reg, to_addr->index()->as_register());
597 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
598 __ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp()));
599 } else {
600 __ fstd(src->as_double_reg(), as_Address(to_addr));
601 }
602 break;
603 #endif // __SOFTFP__
604
605
606 default:
607 ShouldNotReachHere();
608 }
609
610 if (info != nullptr) {
611 add_debug_info_for_null_check(null_check_offset, info);
612 }
613
614 if (patch != nullptr) {
615 // Offset embedded into LDR/STR instruction may appear not enough
616 // to address a field. So, provide a space for one more instruction
617 // that will deal with larger offsets.
618 __ nop();
619 patching_epilog(patch, patch_code, base_reg, info);
620 }
621 }
622
623
624 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
625 assert(src->is_stack(), "should not call otherwise");
626 assert(dest->is_register(), "should not call otherwise");
627
628 Address addr = src->is_single_word() ?
629 frame_map()->address_for_slot(src->single_stack_ix()) :
630 frame_map()->address_for_slot(src->double_stack_ix());
631
632 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
633 if (dest->is_single_fpu() || dest->is_double_fpu()) {
634 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
635 }
636
637 if (dest->is_single_cpu()) {
638 switch (type) {
639 case T_OBJECT:
640 case T_ARRAY:
641 case T_ADDRESS:
642 case T_METADATA: __ ldr(dest->as_register(), addr); break;
643 case T_FLOAT: // used in floatToRawIntBits intrinsic implementation
644 case T_INT: __ ldr_u32(dest->as_register(), addr); break;
645 default:
646 ShouldNotReachHere();
647 }
648 if ((type == T_OBJECT) || (type == T_ARRAY)) {
649 __ verify_oop(dest->as_register());
650 }
651 } else if (dest->is_double_cpu()) {
652 __ ldr(dest->as_register_lo(), addr);
653 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
654 } else if (dest->is_single_fpu()) {
655 __ ldr_float(dest->as_float_reg(), addr);
656 } else if (dest->is_double_fpu()) {
657 __ ldr_double(dest->as_double_reg(), addr);
658 } else {
659 ShouldNotReachHere();
660 }
661 }
662
663
664 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
665 if (src->is_single_stack()) {
666 switch (src->type()) {
667 case T_OBJECT:
668 case T_ARRAY:
669 case T_ADDRESS:
670 case T_METADATA:
671 __ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
672 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
673 break;
674
675 case T_INT:
676 case T_FLOAT:
677 __ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
678 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
679 break;
680
681 default:
682 ShouldNotReachHere();
683 }
684 } else {
685 assert(src->is_double_stack(), "must be");
686 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes));
687 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
688 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
689 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
690 }
691 }
692
693
694 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
695 LIR_PatchCode patch_code, CodeEmitInfo* info,
696 bool wide) {
697 assert(src->is_address(), "should not call otherwise");
698 assert(dest->is_register(), "should not call otherwise");
699 LIR_Address* addr = src->as_address_ptr();
700
701 Register base_reg = addr->base()->as_pointer_register();
702
703 PatchingStub* patch = nullptr;
704 if (patch_code != lir_patch_none) {
705 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
706 }
707 if (info != nullptr) {
708 add_debug_info_for_null_check_here(info);
709 }
710
711 switch (type) {
712 case T_OBJECT: // fall through
713 case T_ARRAY:
714 if (UseCompressedOops && !wide) {
715 __ ldr_u32(dest->as_register(), as_Address(addr));
716 } else {
717 __ ldr(dest->as_register(), as_Address(addr));
718 }
719 break;
720
721 case T_ADDRESS:
722 __ ldr(dest->as_pointer_register(), as_Address(addr));
723 break;
724
725 case T_INT:
726 #ifdef __SOFTFP__
727 case T_FLOAT:
728 #endif // __SOFTFP__
729 __ ldr(dest->as_pointer_register(), as_Address(addr));
730 break;
731
732 case T_BOOLEAN:
733 __ ldrb(dest->as_register(), as_Address(addr));
734 break;
735
736 case T_BYTE:
737 __ ldrsb(dest->as_register(), as_Address(addr));
738 break;
739
740 case T_CHAR:
741 __ ldrh(dest->as_register(), as_Address(addr));
742 break;
743
744 case T_SHORT:
745 __ ldrsh(dest->as_register(), as_Address(addr));
746 break;
747
748
749 #ifdef __SOFTFP__
750 case T_DOUBLE:
751 #endif // __SOFTFP__
752 case T_LONG: {
753 Register to_lo = dest->as_register_lo();
754 Register to_hi = dest->as_register_hi();
755 if (addr->index()->is_register()) {
756 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
757 assert(addr->disp() == 0, "Not yet supporting both");
758 __ add(Rtemp, base_reg, addr->index()->as_register());
759 base_reg = Rtemp;
760 __ ldr(to_lo, Address(Rtemp));
761 if (patch != nullptr) {
762 __ nop(); // see comment before patching_epilog for 2nd ldr
763 patching_epilog(patch, lir_patch_low, base_reg, info);
764 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
765 patch_code = lir_patch_high;
766 }
767 __ ldr(to_hi, Address(Rtemp, BytesPerWord));
768 } else if (base_reg == to_lo) {
769 __ ldr(to_hi, as_Address_hi(addr));
770 if (patch != nullptr) {
771 __ nop(); // see comment before patching_epilog for 2nd ldr
772 patching_epilog(patch, lir_patch_high, base_reg, info);
773 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
774 patch_code = lir_patch_low;
775 }
776 __ ldr(to_lo, as_Address_lo(addr));
777 } else {
778 __ ldr(to_lo, as_Address_lo(addr));
779 if (patch != nullptr) {
780 __ nop(); // see comment before patching_epilog for 2nd ldr
781 patching_epilog(patch, lir_patch_low, base_reg, info);
782 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
783 patch_code = lir_patch_high;
784 }
785 __ ldr(to_hi, as_Address_hi(addr));
786 }
787 break;
788 }
789
790 #ifndef __SOFTFP__
791 case T_FLOAT:
792 if (addr->index()->is_register()) {
793 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
794 __ add(Rtemp, base_reg, addr->index()->as_register());
795 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
796 __ flds(dest->as_float_reg(), Address(Rtemp, addr->disp()));
797 } else {
798 __ flds(dest->as_float_reg(), as_Address(addr));
799 }
800 break;
801
802 case T_DOUBLE:
803 if (addr->index()->is_register()) {
804 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
805 __ add(Rtemp, base_reg, addr->index()->as_register());
806 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
807 __ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp()));
808 } else {
809 __ fldd(dest->as_double_reg(), as_Address(addr));
810 }
811 break;
812 #endif // __SOFTFP__
813
814
815 default:
816 ShouldNotReachHere();
817 }
818
819 if (patch != nullptr) {
820 // Offset embedded into LDR/STR instruction may appear not enough
821 // to address a field. So, provide a space for one more instruction
822 // that will deal with larger offsets.
823 __ nop();
824 patching_epilog(patch, patch_code, base_reg, info);
825 }
826
827 }
828
829
830 void LIR_Assembler::emit_op3(LIR_Op3* op) {
831 bool is_32 = op->result_opr()->is_single_cpu();
832
833 if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) {
834 int c = op->in_opr2()->as_constant_ptr()->as_jint();
835 assert(is_power_of_2(c), "non power-of-2 constant should be put in a register");
836
837 Register left = op->in_opr1()->as_register();
838 Register dest = op->result_opr()->as_register();
839 if (c == 1) {
840 __ mov(dest, left);
841 } else if (c == 2) {
842 __ add_32(dest, left, AsmOperand(left, lsr, 31));
843 __ asr_32(dest, dest, 1);
844 } else if (c != (int) 0x80000000) {
845 int power = log2i_exact(c);
846 __ asr_32(Rtemp, left, 31);
847 __ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0);
848 __ asr_32(dest, dest, power); // dest = dest >>> power;
849 } else {
850 // x/0x80000000 is a special case, since dividend is a power of two, but is negative.
851 // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000.
852 __ cmp_32(left, c);
853 __ mov(dest, 0, ne);
854 __ mov(dest, 1, eq);
855 }
856 } else {
857 assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3");
858 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
859 add_debug_info_for_div0_here(op->info());
860 }
861 }
862
863
864 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
865 #ifdef ASSERT
866 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
867 if (op->block() != nullptr) _branch_target_blocks.append(op->block());
868 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
869 assert(op->info() == nullptr, "CodeEmitInfo?");
870 #endif // ASSERT
871
872 #ifdef __SOFTFP__
873 assert (op->code() != lir_cond_float_branch, "this should be impossible");
874 #else
875 if (op->code() == lir_cond_float_branch) {
876 __ fmstat();
877 __ b(*(op->ublock()->label()), vs);
878 }
879 #endif // __SOFTFP__
880
881 AsmCondition acond = al;
882 switch (op->cond()) {
883 case lir_cond_equal: acond = eq; break;
884 case lir_cond_notEqual: acond = ne; break;
885 case lir_cond_less: acond = lt; break;
886 case lir_cond_lessEqual: acond = le; break;
887 case lir_cond_greaterEqual: acond = ge; break;
888 case lir_cond_greater: acond = gt; break;
889 case lir_cond_aboveEqual: acond = hs; break;
890 case lir_cond_belowEqual: acond = ls; break;
891 default: assert(op->cond() == lir_cond_always, "must be");
892 }
893 __ b(*(op->label()), acond);
894 }
895
896
897 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
898 LIR_Opr src = op->in_opr();
899 LIR_Opr dest = op->result_opr();
900
901 switch (op->bytecode()) {
902 case Bytecodes::_i2l:
903 move_regs(src->as_register(), dest->as_register_lo());
904 __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31));
905 break;
906 case Bytecodes::_l2i:
907 move_regs(src->as_register_lo(), dest->as_register());
908 break;
909 case Bytecodes::_i2b:
910 __ sign_extend(dest->as_register(), src->as_register(), 8);
911 break;
912 case Bytecodes::_i2s:
913 __ sign_extend(dest->as_register(), src->as_register(), 16);
914 break;
915 case Bytecodes::_i2c:
916 __ zero_extend(dest->as_register(), src->as_register(), 16);
917 break;
918 case Bytecodes::_f2d:
919 __ convert_f2d(dest->as_double_reg(), src->as_float_reg());
920 break;
921 case Bytecodes::_d2f:
922 __ convert_d2f(dest->as_float_reg(), src->as_double_reg());
923 break;
924 case Bytecodes::_i2f:
925 __ fmsr(Stemp, src->as_register());
926 __ fsitos(dest->as_float_reg(), Stemp);
927 break;
928 case Bytecodes::_i2d:
929 __ fmsr(Stemp, src->as_register());
930 __ fsitod(dest->as_double_reg(), Stemp);
931 break;
932 case Bytecodes::_f2i:
933 __ ftosizs(Stemp, src->as_float_reg());
934 __ fmrs(dest->as_register(), Stemp);
935 break;
936 case Bytecodes::_d2i:
937 __ ftosizd(Stemp, src->as_double_reg());
938 __ fmrs(dest->as_register(), Stemp);
939 break;
940 default:
941 ShouldNotReachHere();
942 }
943 }
944
945
946 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
947 if (op->init_check()) {
948 Register tmp = op->tmp1()->as_register();
949 __ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
950 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
951 add_debug_info_for_null_check_here(op->stub()->info());
952 __ cmp(tmp, InstanceKlass::fully_initialized);
953 __ b(*op->stub()->entry(), ne);
954 }
955 __ allocate_object(op->obj()->as_register(),
956 op->tmp1()->as_register(),
957 op->tmp2()->as_register(),
958 op->tmp3()->as_register(),
959 op->header_size(),
960 op->object_size(),
961 op->klass()->as_register(),
962 *op->stub()->entry());
963 __ bind(*op->stub()->continuation());
964 }
965
966 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
967 if (UseSlowPath ||
968 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
969 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
970 __ b(*op->stub()->entry());
971 } else {
972 __ allocate_array(op->obj()->as_register(),
973 op->len()->as_register(),
974 op->tmp1()->as_register(),
975 op->tmp2()->as_register(),
976 op->tmp3()->as_register(),
977 arrayOopDesc::base_offset_in_bytes(op->type()),
978 type2aelembytes(op->type()),
979 op->klass()->as_register(),
980 *op->stub()->entry());
981 }
982 __ bind(*op->stub()->continuation());
983 }
984
985 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
986 ciMethodData *md, ciProfileData *data,
987 Register recv, Register tmp1, Label* update_done) {
988 assert_different_registers(mdo, recv, tmp1);
989 uint i;
990 for (i = 0; i < VirtualCallData::row_limit(); i++) {
991 Label next_test;
992 // See if the receiver is receiver[n].
993 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
994 mdo_offset_bias);
995 __ ldr(tmp1, receiver_addr);
996 __ verify_klass_ptr(tmp1);
997 __ cmp(recv, tmp1);
998 __ b(next_test, ne);
999 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
1000 mdo_offset_bias);
1001 __ ldr(tmp1, data_addr);
1002 __ add(tmp1, tmp1, DataLayout::counter_increment);
1003 __ str(tmp1, data_addr);
1004 __ b(*update_done);
1005 __ bind(next_test);
1006 }
1007
1008 // Didn't find receiver; find next empty slot and fill it in
1009 for (i = 0; i < VirtualCallData::row_limit(); i++) {
1010 Label next_test;
1011 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
1012 mdo_offset_bias);
1013 __ ldr(tmp1, recv_addr);
1014 __ cbnz(tmp1, next_test);
1015 __ str(recv, recv_addr);
1016 __ mov(tmp1, DataLayout::counter_increment);
1017 __ str(tmp1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
1018 mdo_offset_bias));
1019 __ b(*update_done);
1020 __ bind(next_test);
1021 }
1022 }
1023
1024 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
1025 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
1026 md = method->method_data_or_null();
1027 assert(md != nullptr, "Sanity");
1028 data = md->bci_to_data(bci);
1029 assert(data != nullptr, "need data for checkcast");
1030 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1031 if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) {
1032 // The offset is large so bias the mdo by the base of the slot so
1033 // that the ldr can use an immediate offset to reference the slots of the data
1034 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
1035 }
1036 }
1037
1038 // On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null).
1039 void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci,
1040 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias,
1041 Register obj, Register mdo, Register data_val, Label* obj_is_null) {
1042 assert(method != nullptr, "Should have method");
1043 assert_different_registers(obj, mdo, data_val);
1044 setup_md_access(method, bci, md, data, mdo_offset_bias);
1045 Label not_null;
1046 __ b(not_null, ne);
1047 __ mov_metadata(mdo, md->constant_encoding());
1048 if (mdo_offset_bias > 0) {
1049 __ mov_slow(data_val, mdo_offset_bias);
1050 __ add(mdo, mdo, data_val);
1051 }
1052 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
1053 __ ldrb(data_val, flags_addr);
1054 __ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant());
1055 __ strb(data_val, flags_addr);
1056 __ b(*obj_is_null);
1057 __ bind(not_null);
1058 }
1059
1060 void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias,
1061 Register mdo, Register recv, Register value, Register tmp1,
1062 Label* profile_cast_success, Label* profile_cast_failure,
1063 Label* success, Label* failure) {
1064 assert_different_registers(mdo, value, tmp1);
1065 __ bind(*profile_cast_success);
1066 __ mov_metadata(mdo, md->constant_encoding());
1067 if (mdo_offset_bias > 0) {
1068 __ mov_slow(tmp1, mdo_offset_bias);
1069 __ add(mdo, mdo, tmp1);
1070 }
1071 __ load_klass(recv, value);
1072 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
1073 __ b(*success);
1074 // Cast failure case
1075 __ bind(*profile_cast_failure);
1076 __ mov_metadata(mdo, md->constant_encoding());
1077 if (mdo_offset_bias > 0) {
1078 __ mov_slow(tmp1, mdo_offset_bias);
1079 __ add(mdo, mdo, tmp1);
1080 }
1081 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
1082 __ ldr(tmp1, data_addr);
1083 __ sub(tmp1, tmp1, DataLayout::counter_increment);
1084 __ str(tmp1, data_addr);
1085 __ b(*failure);
1086 }
1087
1088 // Sets `res` to true, if `cond` holds.
1089 static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) {
1090 __ mov(res, 1, cond);
1091 }
1092
1093
1094 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1095 // TODO: ARM - can be more effective with one more register
1096 switch (op->code()) {
1097 case lir_store_check: {
1098 CodeStub* stub = op->stub();
1099 Register value = op->object()->as_register();
1100 Register array = op->array()->as_register();
1101 Register klass_RInfo = op->tmp1()->as_register();
1102 Register k_RInfo = op->tmp2()->as_register();
1103 assert_different_registers(klass_RInfo, k_RInfo, Rtemp);
1104 if (op->should_profile()) {
1105 assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp);
1106 }
1107
1108 // check if it needs to be profiled
1109 ciMethodData* md;
1110 ciProfileData* data;
1111 int mdo_offset_bias = 0;
1112 Label profile_cast_success, profile_cast_failure, done;
1113 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1114 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1115
1116 if (op->should_profile()) {
1117 __ cmp(value, 0);
1118 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done);
1119 } else {
1120 __ cbz(value, done);
1121 }
1122 assert_different_registers(k_RInfo, value);
1123 add_debug_info_for_null_check_here(op->info_for_exception());
1124 __ load_klass(k_RInfo, array);
1125 __ load_klass(klass_RInfo, value);
1126 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1127 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1128 // check for immediate positive hit
1129 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1130 __ cmp(klass_RInfo, k_RInfo);
1131 __ cond_cmp(Rtemp, k_RInfo, ne);
1132 __ b(*success_target, eq);
1133 // check for immediate negative hit
1134 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1135 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1136 __ b(*failure_target, ne);
1137 // slow case
1138 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1139 __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type);
1140 __ cbz(R0, *failure_target);
1141 if (op->should_profile()) {
1142 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1143 if (mdo == value) {
1144 mdo = k_RInfo;
1145 recv = klass_RInfo;
1146 }
1147 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, value, tmp1,
1148 &profile_cast_success, &profile_cast_failure,
1149 &done, stub->entry());
1150 }
1151 __ bind(done);
1152 break;
1153 }
1154
1155 case lir_checkcast: {
1156 CodeStub* stub = op->stub();
1157 Register obj = op->object()->as_register();
1158 Register res = op->result_opr()->as_register();
1159 Register klass_RInfo = op->tmp1()->as_register();
1160 Register k_RInfo = op->tmp2()->as_register();
1161 ciKlass* k = op->klass();
1162 assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp);
1163
1164 if (stub->is_simple_exception_stub()) {
1165 // TODO: ARM - Late binding is used to prevent confusion of register allocator
1166 assert(stub->is_exception_throw_stub(), "must be");
1167 ((SimpleExceptionStub*)stub)->set_obj(op->result_opr());
1168 }
1169 ciMethodData* md;
1170 ciProfileData* data;
1171 int mdo_offset_bias = 0;
1172
1173 Label done;
1174
1175 Label profile_cast_failure, profile_cast_success;
1176 Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry();
1177 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1178
1179
1180 __ movs(res, obj);
1181 if (op->should_profile()) {
1182 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1183 } else {
1184 __ b(done, eq);
1185 }
1186 if (k->is_loaded()) {
1187 __ mov_metadata(k_RInfo, k->constant_encoding());
1188 } else if (k_RInfo != obj) {
1189 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1190 __ movs(res, obj);
1191 } else {
1192 // Patching doesn't update "res" register after GC, so do patching first
1193 klass2reg_with_patching(Rtemp, op->info_for_patch());
1194 __ movs(res, obj);
1195 __ mov(k_RInfo, Rtemp);
1196 }
1197 __ load_klass(klass_RInfo, res, ne);
1198
1199 if (op->fast_check()) {
1200 __ cmp(klass_RInfo, k_RInfo, ne);
1201 __ b(*failure_target, ne);
1202 } else if (k->is_loaded()) {
1203 __ b(*success_target, eq);
1204 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset()));
1205 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) {
1206 __ cmp(Rtemp, k_RInfo);
1207 __ b(*failure_target, ne);
1208 } else {
1209 __ cmp(klass_RInfo, k_RInfo);
1210 __ cmp(Rtemp, k_RInfo, ne);
1211 __ b(*success_target, eq);
1212 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1213 __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type);
1214 __ cbz(R0, *failure_target);
1215 }
1216 } else {
1217 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1218 __ b(*success_target, eq);
1219 // check for immediate positive hit
1220 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1221 __ cmp(klass_RInfo, k_RInfo);
1222 __ cmp(Rtemp, k_RInfo, ne);
1223 __ b(*success_target, eq);
1224 // check for immediate negative hit
1225 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1226 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1227 __ b(*failure_target, ne);
1228 // slow case
1229 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1230 __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type);
1231 __ cbz(R0, *failure_target);
1232 }
1233
1234 if (op->should_profile()) {
1235 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1236 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1237 &profile_cast_success, &profile_cast_failure,
1238 &done, stub->entry());
1239 }
1240 __ bind(done);
1241 break;
1242 }
1243
1244 case lir_instanceof: {
1245 Register obj = op->object()->as_register();
1246 Register res = op->result_opr()->as_register();
1247 Register klass_RInfo = op->tmp1()->as_register();
1248 Register k_RInfo = op->tmp2()->as_register();
1249 ciKlass* k = op->klass();
1250 assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp);
1251
1252 ciMethodData* md;
1253 ciProfileData* data;
1254 int mdo_offset_bias = 0;
1255
1256 Label done;
1257
1258 Label profile_cast_failure, profile_cast_success;
1259 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done;
1260 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1261
1262 __ movs(res, obj);
1263
1264 if (op->should_profile()) {
1265 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1266 } else {
1267 __ b(done, eq);
1268 }
1269
1270 if (k->is_loaded()) {
1271 __ mov_metadata(k_RInfo, k->constant_encoding());
1272 } else {
1273 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res));
1274 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1275 }
1276 __ load_klass(klass_RInfo, res);
1277
1278 if (!op->should_profile()) {
1279 __ mov(res, 0);
1280 }
1281
1282 if (op->fast_check()) {
1283 __ cmp(klass_RInfo, k_RInfo);
1284 if (!op->should_profile()) {
1285 set_instanceof_result(_masm, res, eq);
1286 } else {
1287 __ b(profile_cast_failure, ne);
1288 }
1289 } else if (k->is_loaded()) {
1290 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset()));
1291 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) {
1292 __ cmp(Rtemp, k_RInfo);
1293 if (!op->should_profile()) {
1294 set_instanceof_result(_masm, res, eq);
1295 } else {
1296 __ b(profile_cast_failure, ne);
1297 }
1298 } else {
1299 __ cmp(klass_RInfo, k_RInfo);
1300 __ cond_cmp(Rtemp, k_RInfo, ne);
1301 if (!op->should_profile()) {
1302 set_instanceof_result(_masm, res, eq);
1303 }
1304 __ b(*success_target, eq);
1305 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1306 __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type);
1307 if (!op->should_profile()) {
1308 move_regs(R0, res);
1309 } else {
1310 __ cbz(R0, *failure_target);
1311 }
1312 }
1313 } else {
1314 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1315 // check for immediate positive hit
1316 __ cmp(klass_RInfo, k_RInfo);
1317 if (!op->should_profile()) {
1318 __ ldr(res, Address(klass_RInfo, Rtemp), ne);
1319 __ cond_cmp(res, k_RInfo, ne);
1320 set_instanceof_result(_masm, res, eq);
1321 } else {
1322 __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne);
1323 __ cond_cmp(Rtemp, k_RInfo, ne);
1324 }
1325 __ b(*success_target, eq);
1326 // check for immediate negative hit
1327 if (op->should_profile()) {
1328 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1329 }
1330 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1331 if (!op->should_profile()) {
1332 __ mov(res, 0, ne);
1333 }
1334 __ b(*failure_target, ne);
1335 // slow case
1336 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1337 __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type);
1338 if (!op->should_profile()) {
1339 move_regs(R0, res);
1340 }
1341 if (op->should_profile()) {
1342 __ cbz(R0, *failure_target);
1343 }
1344 }
1345
1346 if (op->should_profile()) {
1347 Label done_ok, done_failure;
1348 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1349 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1350 &profile_cast_success, &profile_cast_failure,
1351 &done_ok, &done_failure);
1352 __ bind(done_failure);
1353 __ mov(res, 0);
1354 __ b(done);
1355 __ bind(done_ok);
1356 __ mov(res, 1);
1357 }
1358 __ bind(done);
1359 break;
1360 }
1361 default:
1362 ShouldNotReachHere();
1363 }
1364 }
1365
1366
1367 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1368 // if (*addr == cmpval) {
1369 // *addr = newval;
1370 // dest = 1;
1371 // } else {
1372 // dest = 0;
1373 // }
1374 // FIXME: membar_release
1375 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
1376 Register addr = op->addr()->is_register() ?
1377 op->addr()->as_pointer_register() :
1378 op->addr()->as_address_ptr()->base()->as_pointer_register();
1379 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp");
1380 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_Opr::illegalOpr(), "unexpected index");
1381 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1382 Register cmpval = op->cmp_value()->as_register();
1383 Register newval = op->new_value()->as_register();
1384 Register dest = op->result_opr()->as_register();
1385 assert_different_registers(dest, addr, cmpval, newval, Rtemp);
1386
1387 __ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer
1388 __ mov(dest, 1, eq);
1389 __ mov(dest, 0, ne);
1390 } else if (op->code() == lir_cas_long) {
1391 Register cmp_value_lo = op->cmp_value()->as_register_lo();
1392 Register cmp_value_hi = op->cmp_value()->as_register_hi();
1393 Register new_value_lo = op->new_value()->as_register_lo();
1394 Register new_value_hi = op->new_value()->as_register_hi();
1395 Register dest = op->result_opr()->as_register();
1396 Register tmp_lo = op->tmp1()->as_register_lo();
1397 Register tmp_hi = op->tmp1()->as_register_hi();
1398
1399 assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr);
1400 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
1401 assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair");
1402 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
1403 assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair");
1404 __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi,
1405 new_value_lo, new_value_hi, addr, 0);
1406 } else {
1407 Unimplemented();
1408 }
1409 // FIXME: is full membar really needed instead of just membar_acquire?
1410 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
1411 }
1412
1413
1414 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1415 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1416 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on arm");
1417
1418 AsmCondition acond = al;
1419 AsmCondition ncond = nv;
1420 if (opr1 != opr2) {
1421 switch (condition) {
1422 case lir_cond_equal: acond = eq; ncond = ne; break;
1423 case lir_cond_notEqual: acond = ne; ncond = eq; break;
1424 case lir_cond_less: acond = lt; ncond = ge; break;
1425 case lir_cond_lessEqual: acond = le; ncond = gt; break;
1426 case lir_cond_greaterEqual: acond = ge; ncond = lt; break;
1427 case lir_cond_greater: acond = gt; ncond = le; break;
1428 case lir_cond_aboveEqual: acond = hs; ncond = lo; break;
1429 case lir_cond_belowEqual: acond = ls; ncond = hi; break;
1430 default: ShouldNotReachHere();
1431 }
1432 }
1433
1434 for (;;) { // two iterations only
1435 if (opr1 == result) {
1436 // do nothing
1437 } else if (opr1->is_single_cpu()) {
1438 __ mov(result->as_register(), opr1->as_register(), acond);
1439 } else if (opr1->is_double_cpu()) {
1440 __ long_move(result->as_register_lo(), result->as_register_hi(),
1441 opr1->as_register_lo(), opr1->as_register_hi(), acond);
1442 } else if (opr1->is_single_stack()) {
1443 __ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond);
1444 } else if (opr1->is_double_stack()) {
1445 __ ldr(result->as_register_lo(),
1446 frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond);
1447 __ ldr(result->as_register_hi(),
1448 frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond);
1449 } else if (opr1->is_illegal()) {
1450 // do nothing: this part of the cmove has been optimized away in the peephole optimizer
1451 } else {
1452 assert(opr1->is_constant(), "must be");
1453 LIR_Const* c = opr1->as_constant_ptr();
1454
1455 switch (c->type()) {
1456 case T_INT:
1457 __ mov_slow(result->as_register(), c->as_jint(), acond);
1458 break;
1459 case T_LONG:
1460 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond);
1461 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond);
1462 break;
1463 case T_OBJECT:
1464 __ mov_oop(result->as_register(), c->as_jobject(), 0, acond);
1465 break;
1466 case T_FLOAT:
1467 #ifdef __SOFTFP__
1468 // not generated now.
1469 __ mov_slow(result->as_register(), c->as_jint(), acond);
1470 #else
1471 __ mov_float(result->as_float_reg(), c->as_jfloat(), acond);
1472 #endif // __SOFTFP__
1473 break;
1474 case T_DOUBLE:
1475 #ifdef __SOFTFP__
1476 // not generated now.
1477 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond);
1478 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond);
1479 #else
1480 __ mov_double(result->as_double_reg(), c->as_jdouble(), acond);
1481 #endif // __SOFTFP__
1482 break;
1483 case T_METADATA:
1484 __ mov_metadata(result->as_register(), c->as_metadata(), acond);
1485 break;
1486 default:
1487 ShouldNotReachHere();
1488 }
1489 }
1490
1491 // Negate the condition and repeat the algorithm with the second operand
1492 if (opr1 == opr2) { break; }
1493 opr1 = opr2;
1494 acond = ncond;
1495 }
1496 }
1497
1498 #ifdef ASSERT
1499 static int reg_size(LIR_Opr op) {
1500 switch (op->type()) {
1501 case T_FLOAT:
1502 case T_INT: return BytesPerInt;
1503 case T_LONG:
1504 case T_DOUBLE: return BytesPerLong;
1505 case T_OBJECT:
1506 case T_ARRAY:
1507 case T_METADATA: return BytesPerWord;
1508 case T_ADDRESS:
1509 case T_ILLEGAL: // fall through
1510 default: ShouldNotReachHere(); return -1;
1511 }
1512 }
1513 #endif
1514
1515 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1516 assert(info == nullptr, "unused on this code path");
1517 assert(dest->is_register(), "wrong items state");
1518
1519 if (right->is_address()) {
1520 // special case for adding shifted/extended register
1521 const Register res = dest->as_pointer_register();
1522 const Register lreg = left->as_pointer_register();
1523 const LIR_Address* addr = right->as_address_ptr();
1524
1525 assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be");
1526
1527 int scale = addr->scale();
1528 AsmShift shift = lsl;
1529
1530
1531 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be");
1532 assert(reg_size(addr->base()) == reg_size(dest), "should be");
1533 assert(reg_size(dest) == wordSize, "should be");
1534
1535 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale);
1536 switch (code) {
1537 case lir_add: __ add(res, lreg, operand); break;
1538 case lir_sub: __ sub(res, lreg, operand); break;
1539 default: ShouldNotReachHere();
1540 }
1541
1542 } else if (left->is_address()) {
1543 assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()");
1544 const LIR_Address* addr = left->as_address_ptr();
1545 const Register res = dest->as_register();
1546 const Register rreg = right->as_register();
1547 assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be");
1548 __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale()));
1549
1550 } else if (dest->is_single_cpu()) {
1551 assert(left->is_single_cpu(), "unexpected left operand");
1552
1553 const Register res = dest->as_register();
1554 const Register lreg = left->as_register();
1555
1556 if (right->is_single_cpu()) {
1557 const Register rreg = right->as_register();
1558 switch (code) {
1559 case lir_add: __ add_32(res, lreg, rreg); break;
1560 case lir_sub: __ sub_32(res, lreg, rreg); break;
1561 case lir_mul: __ mul_32(res, lreg, rreg); break;
1562 default: ShouldNotReachHere();
1563 }
1564 } else {
1565 assert(right->is_constant(), "must be");
1566 const jint c = right->as_constant_ptr()->as_jint();
1567 if (!Assembler::is_arith_imm_in_range(c)) {
1568 BAILOUT("illegal arithmetic operand");
1569 }
1570 switch (code) {
1571 case lir_add: __ add_32(res, lreg, c); break;
1572 case lir_sub: __ sub_32(res, lreg, c); break;
1573 default: ShouldNotReachHere();
1574 }
1575 }
1576
1577 } else if (dest->is_double_cpu()) {
1578 Register res_lo = dest->as_register_lo();
1579 Register res_hi = dest->as_register_hi();
1580 Register lreg_lo = left->as_register_lo();
1581 Register lreg_hi = left->as_register_hi();
1582 if (right->is_double_cpu()) {
1583 Register rreg_lo = right->as_register_lo();
1584 Register rreg_hi = right->as_register_hi();
1585 if (res_lo == lreg_hi || res_lo == rreg_hi) {
1586 res_lo = Rtemp;
1587 }
1588 switch (code) {
1589 case lir_add:
1590 __ adds(res_lo, lreg_lo, rreg_lo);
1591 __ adc(res_hi, lreg_hi, rreg_hi);
1592 break;
1593 case lir_sub:
1594 __ subs(res_lo, lreg_lo, rreg_lo);
1595 __ sbc(res_hi, lreg_hi, rreg_hi);
1596 break;
1597 default:
1598 ShouldNotReachHere();
1599 }
1600 } else {
1601 assert(right->is_constant(), "must be");
1602 assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range");
1603 const jint c = (jint) right->as_constant_ptr()->as_jlong();
1604 if (res_lo == lreg_hi) {
1605 res_lo = Rtemp;
1606 }
1607 switch (code) {
1608 case lir_add:
1609 __ adds(res_lo, lreg_lo, c);
1610 __ adc(res_hi, lreg_hi, 0);
1611 break;
1612 case lir_sub:
1613 __ subs(res_lo, lreg_lo, c);
1614 __ sbc(res_hi, lreg_hi, 0);
1615 break;
1616 default:
1617 ShouldNotReachHere();
1618 }
1619 }
1620 move_regs(res_lo, dest->as_register_lo());
1621
1622 } else if (dest->is_single_fpu()) {
1623 assert(left->is_single_fpu(), "must be");
1624 assert(right->is_single_fpu(), "must be");
1625 const FloatRegister res = dest->as_float_reg();
1626 const FloatRegister lreg = left->as_float_reg();
1627 const FloatRegister rreg = right->as_float_reg();
1628 switch (code) {
1629 case lir_add: __ add_float(res, lreg, rreg); break;
1630 case lir_sub: __ sub_float(res, lreg, rreg); break;
1631 case lir_mul: __ mul_float(res, lreg, rreg); break;
1632 case lir_div: __ div_float(res, lreg, rreg); break;
1633 default: ShouldNotReachHere();
1634 }
1635 } else if (dest->is_double_fpu()) {
1636 assert(left->is_double_fpu(), "must be");
1637 assert(right->is_double_fpu(), "must be");
1638 const FloatRegister res = dest->as_double_reg();
1639 const FloatRegister lreg = left->as_double_reg();
1640 const FloatRegister rreg = right->as_double_reg();
1641 switch (code) {
1642 case lir_add: __ add_double(res, lreg, rreg); break;
1643 case lir_sub: __ sub_double(res, lreg, rreg); break;
1644 case lir_mul: __ mul_double(res, lreg, rreg); break;
1645 case lir_div: __ div_double(res, lreg, rreg); break;
1646 default: ShouldNotReachHere();
1647 }
1648 } else {
1649 ShouldNotReachHere();
1650 }
1651 }
1652
1653
1654 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1655 switch (code) {
1656 case lir_abs:
1657 __ abs_double(dest->as_double_reg(), value->as_double_reg());
1658 break;
1659 case lir_sqrt:
1660 __ sqrt_double(dest->as_double_reg(), value->as_double_reg());
1661 break;
1662 default:
1663 ShouldNotReachHere();
1664 }
1665 }
1666
1667
1668 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1669 assert(dest->is_register(), "wrong items state");
1670 assert(left->is_register(), "wrong items state");
1671
1672 if (dest->is_single_cpu()) {
1673
1674 const Register res = dest->as_register();
1675 const Register lreg = left->as_register();
1676
1677 if (right->is_single_cpu()) {
1678 const Register rreg = right->as_register();
1679 switch (code) {
1680 case lir_logic_and: __ and_32(res, lreg, rreg); break;
1681 case lir_logic_or: __ orr_32(res, lreg, rreg); break;
1682 case lir_logic_xor: __ eor_32(res, lreg, rreg); break;
1683 default: ShouldNotReachHere();
1684 }
1685 } else {
1686 assert(right->is_constant(), "must be");
1687 const uint c = (uint)right->as_constant_ptr()->as_jint();
1688 if (!Assembler::is_arith_imm_in_range(c)) {
1689 BAILOUT("illegal arithmetic operand");
1690 }
1691 switch (code) {
1692 case lir_logic_and: __ and_32(res, lreg, c); break;
1693 case lir_logic_or: __ orr_32(res, lreg, c); break;
1694 case lir_logic_xor: __ eor_32(res, lreg, c); break;
1695 default: ShouldNotReachHere();
1696 }
1697 }
1698 } else {
1699 assert(dest->is_double_cpu(), "should be");
1700 Register res_lo = dest->as_register_lo();
1701
1702 assert (dest->type() == T_LONG, "unexpected result type");
1703 assert (left->type() == T_LONG, "unexpected left type");
1704 assert (right->type() == T_LONG, "unexpected right type");
1705
1706 const Register res_hi = dest->as_register_hi();
1707 const Register lreg_lo = left->as_register_lo();
1708 const Register lreg_hi = left->as_register_hi();
1709
1710 if (right->is_register()) {
1711 const Register rreg_lo = right->as_register_lo();
1712 const Register rreg_hi = right->as_register_hi();
1713 if (res_lo == lreg_hi || res_lo == rreg_hi) {
1714 res_lo = Rtemp; // Temp register helps to avoid overlap between result and input
1715 }
1716 switch (code) {
1717 case lir_logic_and:
1718 __ andr(res_lo, lreg_lo, rreg_lo);
1719 __ andr(res_hi, lreg_hi, rreg_hi);
1720 break;
1721 case lir_logic_or:
1722 __ orr(res_lo, lreg_lo, rreg_lo);
1723 __ orr(res_hi, lreg_hi, rreg_hi);
1724 break;
1725 case lir_logic_xor:
1726 __ eor(res_lo, lreg_lo, rreg_lo);
1727 __ eor(res_hi, lreg_hi, rreg_hi);
1728 break;
1729 default:
1730 ShouldNotReachHere();
1731 }
1732 move_regs(res_lo, dest->as_register_lo());
1733 } else {
1734 assert(right->is_constant(), "must be");
1735 const jint c_lo = (jint) right->as_constant_ptr()->as_jlong();
1736 const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32);
1737 // Case for logic_or from do_ClassIDIntrinsic()
1738 if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) {
1739 switch (code) {
1740 case lir_logic_and:
1741 __ andr(res_lo, lreg_lo, c_lo);
1742 __ mov(res_hi, 0);
1743 break;
1744 case lir_logic_or:
1745 __ orr(res_lo, lreg_lo, c_lo);
1746 break;
1747 case lir_logic_xor:
1748 __ eor(res_lo, lreg_lo, c_lo);
1749 break;
1750 default:
1751 ShouldNotReachHere();
1752 }
1753 } else if (code == lir_logic_and &&
1754 c_hi == -1 &&
1755 (AsmOperand::is_rotated_imm(c_lo) ||
1756 AsmOperand::is_rotated_imm(~c_lo))) {
1757 // Another case which handles logic_and from do_ClassIDIntrinsic()
1758 if (AsmOperand::is_rotated_imm(c_lo)) {
1759 __ andr(res_lo, lreg_lo, c_lo);
1760 } else {
1761 __ bic(res_lo, lreg_lo, ~c_lo);
1762 }
1763 if (res_hi != lreg_hi) {
1764 __ mov(res_hi, lreg_hi);
1765 }
1766 } else {
1767 BAILOUT("64 bit constant cannot be inlined");
1768 }
1769 }
1770 }
1771 }
1772
1773
1774
1775 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1776 if (opr1->is_single_cpu()) {
1777 if (opr2->is_constant()) {
1778 switch (opr2->as_constant_ptr()->type()) {
1779 case T_INT: {
1780 const jint c = opr2->as_constant_ptr()->as_jint();
1781 if (Assembler::is_arith_imm_in_range(c)) {
1782 __ cmp_32(opr1->as_register(), c);
1783 } else if (Assembler::is_arith_imm_in_range(-c)) {
1784 __ cmn_32(opr1->as_register(), -c);
1785 } else {
1786 // This can happen when compiling lookupswitch
1787 __ mov_slow(Rtemp, c);
1788 __ cmp_32(opr1->as_register(), Rtemp);
1789 }
1790 break;
1791 }
1792 case T_OBJECT:
1793 assert(opr2->as_constant_ptr()->as_jobject() == nullptr, "cannot handle otherwise");
1794 __ cmp(opr1->as_register(), 0);
1795 break;
1796 case T_METADATA:
1797 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests");
1798 assert(opr2->as_constant_ptr()->as_metadata() == nullptr, "cannot handle otherwise");
1799 __ cmp(opr1->as_register(), 0);
1800 break;
1801 default:
1802 ShouldNotReachHere();
1803 }
1804 } else if (opr2->is_single_cpu()) {
1805 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1806 assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY, "incompatibe type");
1807 __ cmpoop(opr1->as_register(), opr2->as_register());
1808 } else if (opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) {
1809 assert(opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type");
1810 __ cmp(opr1->as_register(), opr2->as_register());
1811 } else {
1812 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type");
1813 __ cmp_32(opr1->as_register(), opr2->as_register());
1814 }
1815 } else {
1816 ShouldNotReachHere();
1817 }
1818 } else if (opr1->is_double_cpu()) {
1819 Register xlo = opr1->as_register_lo();
1820 Register xhi = opr1->as_register_hi();
1821 if (opr2->is_constant() && opr2->as_jlong() == 0) {
1822 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise");
1823 __ orrs(Rtemp, xlo, xhi);
1824 } else if (opr2->is_register()) {
1825 Register ylo = opr2->as_register_lo();
1826 Register yhi = opr2->as_register_hi();
1827 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1828 __ teq(xhi, yhi);
1829 __ teq(xlo, ylo, eq);
1830 } else {
1831 __ subs(Rtemp, xlo, ylo);
1832 __ sbcs(Rtemp, xhi, yhi);
1833 }
1834 } else {
1835 ShouldNotReachHere();
1836 }
1837 } else if (opr1->is_single_fpu()) {
1838 if (opr2->is_constant()) {
1839 assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise");
1840 __ cmp_zero_float(opr1->as_float_reg());
1841 } else {
1842 __ cmp_float(opr1->as_float_reg(), opr2->as_float_reg());
1843 }
1844 } else if (opr1->is_double_fpu()) {
1845 if (opr2->is_constant()) {
1846 assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise");
1847 __ cmp_zero_double(opr1->as_double_reg());
1848 } else {
1849 __ cmp_double(opr1->as_double_reg(), opr2->as_double_reg());
1850 }
1851 } else {
1852 ShouldNotReachHere();
1853 }
1854 }
1855
1856 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
1857 const Register res = dst->as_register();
1858 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1859 comp_op(lir_cond_unknown, left, right, op);
1860 __ fmstat();
1861 if (code == lir_ucmp_fd2i) { // unordered is less
1862 __ mvn(res, 0, lt);
1863 __ mov(res, 1, ge);
1864 } else { // unordered is greater
1865 __ mov(res, 1, cs);
1866 __ mvn(res, 0, cc);
1867 }
1868 __ mov(res, 0, eq);
1869
1870 } else {
1871 assert(code == lir_cmp_l2i, "must be");
1872
1873 Label done;
1874 const Register xlo = left->as_register_lo();
1875 const Register xhi = left->as_register_hi();
1876 const Register ylo = right->as_register_lo();
1877 const Register yhi = right->as_register_hi();
1878 __ cmp(xhi, yhi);
1879 __ mov(res, 1, gt);
1880 __ mvn(res, 0, lt);
1881 __ b(done, ne);
1882 __ subs(res, xlo, ylo);
1883 __ mov(res, 1, hi);
1884 __ mvn(res, 0, lo);
1885 __ bind(done);
1886 }
1887 }
1888
1889
1890 void LIR_Assembler::align_call(LIR_Code code) {
1891 // Not needed
1892 }
1893
1894
1895 void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) {
1896 int ret_addr_offset = __ patchable_call(op->addr(), rtype);
1897 assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
1898 add_call_info_here(op->info());
1899 }
1900
1901
1902 void LIR_Assembler::ic_call(LIR_OpJavaCall *op) {
1903 bool near_range = __ cache_fully_reachable();
1904 address oop_address = pc();
1905
1906 bool use_movw = VM_Version::supports_movw();
1907
1908 // Ricklass may contain something that is not a metadata pointer so
1909 // mov_metadata can't be used
1910 InlinedAddress value((address)Universe::non_oop_word());
1911 InlinedAddress addr(op->addr());
1912 if (use_movw) {
1913 __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff);
1914 __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16);
1915 } else {
1916 // No movw/movt, must be load a pc relative value but no
1917 // relocation so no metadata table to load from.
1918 // Use a b instruction rather than a bl, inline constant after the
1919 // branch, use a PC relative ldr to load the constant, arrange for
1920 // the call to return after the constant(s).
1921 __ ldr_literal(Ricklass, value);
1922 }
1923 __ relocate(virtual_call_Relocation::spec(oop_address));
1924 if (near_range && use_movw) {
1925 __ bl(op->addr());
1926 } else {
1927 Label call_return;
1928 __ adr(LR, call_return);
1929 if (near_range) {
1930 __ b(op->addr());
1931 } else {
1932 __ indirect_jump(addr, Rtemp);
1933 __ bind_literal(addr);
1934 }
1935 if (!use_movw) {
1936 __ bind_literal(value);
1937 }
1938 __ bind(call_return);
1939 }
1940 add_call_info(code_offset(), op->info());
1941 }
1942
1943 void LIR_Assembler::emit_static_call_stub() {
1944 address call_pc = __ pc();
1945 address stub = __ start_a_stub(call_stub_size());
1946 if (stub == nullptr) {
1947 BAILOUT("static call stub overflow");
1948 }
1949
1950 DEBUG_ONLY(int offset = code_offset();)
1951
1952 InlinedMetadata metadata_literal(nullptr);
1953 __ relocate(static_stub_Relocation::spec(call_pc));
1954 // If not a single instruction, NativeMovConstReg::next_instruction_address()
1955 // must jump over the whole following ldr_literal.
1956 // (See CompiledDirectCall::set_to_interpreted())
1957 #ifdef ASSERT
1958 address ldr_site = __ pc();
1959 #endif
1960 __ ldr_literal(Rmethod, metadata_literal);
1961 assert(nativeMovConstReg_at(ldr_site)->next_instruction_address() == __ pc(), "Fix ldr_literal or its parsing");
1962 bool near_range = __ cache_fully_reachable();
1963 InlinedAddress dest((address)-1);
1964 if (near_range) {
1965 address branch_site = __ pc();
1966 __ b(branch_site); // b to self maps to special NativeJump -1 destination
1967 } else {
1968 __ indirect_jump(dest, Rtemp);
1969 }
1970 __ bind_literal(metadata_literal); // includes spec_for_immediate reloc
1971 if (!near_range) {
1972 __ bind_literal(dest); // special NativeJump -1 destination
1973 }
1974
1975 assert(code_offset() - offset <= call_stub_size(), "overflow");
1976 __ end_a_stub();
1977 }
1978
1979 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1980 assert(exceptionOop->as_register() == Rexception_obj, "must match");
1981 assert(exceptionPC->as_register() == Rexception_pc, "must match");
1982 info->add_register_oop(exceptionOop);
1983
1984 StubId handle_id = compilation()->has_fpu_code() ?
1985 StubId::c1_handle_exception_id :
1986 StubId::c1_handle_exception_nofpu_id;
1987 Label return_address;
1988 __ adr(Rexception_pc, return_address);
1989 __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type);
1990 __ bind(return_address);
1991 add_call_info_here(info); // for exception handler
1992 }
1993
1994 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1995 assert(exceptionOop->as_register() == Rexception_obj, "must match");
1996 __ b(_unwind_handler_entry);
1997 }
1998
1999 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2000 AsmShift shift = lsl;
2001 switch (code) {
2002 case lir_shl: shift = lsl; break;
2003 case lir_shr: shift = asr; break;
2004 case lir_ushr: shift = lsr; break;
2005 default: ShouldNotReachHere();
2006 }
2007
2008 if (dest->is_single_cpu()) {
2009 __ andr(Rtemp, count->as_register(), 31);
2010 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp));
2011 } else if (dest->is_double_cpu()) {
2012 Register dest_lo = dest->as_register_lo();
2013 Register dest_hi = dest->as_register_hi();
2014 Register src_lo = left->as_register_lo();
2015 Register src_hi = left->as_register_hi();
2016 Register Rcount = count->as_register();
2017 // Resolve possible register conflicts
2018 if (shift == lsl && dest_hi == src_lo) {
2019 dest_hi = Rtemp;
2020 } else if (shift != lsl && dest_lo == src_hi) {
2021 dest_lo = Rtemp;
2022 } else if (dest_lo == src_lo && dest_hi == src_hi) {
2023 dest_lo = Rtemp;
2024 } else if (dest_lo == Rcount || dest_hi == Rcount) {
2025 Rcount = Rtemp;
2026 }
2027 __ andr(Rcount, count->as_register(), 63);
2028 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount);
2029 move_regs(dest_lo, dest->as_register_lo());
2030 move_regs(dest_hi, dest->as_register_hi());
2031 } else {
2032 ShouldNotReachHere();
2033 }
2034 }
2035
2036
2037 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2038 AsmShift shift = lsl;
2039 switch (code) {
2040 case lir_shl: shift = lsl; break;
2041 case lir_shr: shift = asr; break;
2042 case lir_ushr: shift = lsr; break;
2043 default: ShouldNotReachHere();
2044 }
2045
2046 if (dest->is_single_cpu()) {
2047 count &= 31;
2048 if (count != 0) {
2049 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count));
2050 } else {
2051 move_regs(left->as_register(), dest->as_register());
2052 }
2053 } else if (dest->is_double_cpu()) {
2054 count &= 63;
2055 if (count != 0) {
2056 Register dest_lo = dest->as_register_lo();
2057 Register dest_hi = dest->as_register_hi();
2058 Register src_lo = left->as_register_lo();
2059 Register src_hi = left->as_register_hi();
2060 // Resolve possible register conflicts
2061 if (shift == lsl && dest_hi == src_lo) {
2062 dest_hi = Rtemp;
2063 } else if (shift != lsl && dest_lo == src_hi) {
2064 dest_lo = Rtemp;
2065 }
2066 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count);
2067 move_regs(dest_lo, dest->as_register_lo());
2068 move_regs(dest_hi, dest->as_register_hi());
2069 } else {
2070 __ long_move(dest->as_register_lo(), dest->as_register_hi(),
2071 left->as_register_lo(), left->as_register_hi());
2072 }
2073 } else {
2074 ShouldNotReachHere();
2075 }
2076 }
2077
2078
2079 // Saves 4 given registers in reserved argument area.
2080 void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2081 verify_reserved_argument_area_size(4);
2082 __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4));
2083 }
2084
2085 // Restores 4 given registers from reserved argument area.
2086 void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2087 __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback);
2088 }
2089
2090
2091 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2092 ciArrayKlass* default_type = op->expected_type();
2093 Register src = op->src()->as_register();
2094 Register src_pos = op->src_pos()->as_register();
2095 Register dst = op->dst()->as_register();
2096 Register dst_pos = op->dst_pos()->as_register();
2097 Register length = op->length()->as_register();
2098 Register tmp = op->tmp()->as_register();
2099 Register tmp2 = Rtemp;
2100
2101 assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption");
2102
2103 CodeStub* stub = op->stub();
2104
2105 int flags = op->flags();
2106 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2107 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2108
2109 // If we don't know anything or it's an object array, just go through the generic arraycopy
2110 if (default_type == nullptr) {
2111
2112 // save arguments, because they will be killed by a runtime call
2113 save_in_reserved_area(R0, R1, R2, R3);
2114
2115 // pass length argument on SP[0]
2116 __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment
2117
2118 address copyfunc_addr = StubRoutines::generic_arraycopy();
2119 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2120 #ifndef PRODUCT
2121 if (PrintC1Statistics) {
2122 __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2);
2123 }
2124 #endif // !PRODUCT
2125 // the stub is in the code cache so close enough
2126 __ call(copyfunc_addr, relocInfo::runtime_call_type);
2127
2128 __ add(SP, SP, 2*wordSize);
2129
2130 __ cbz_32(R0, *stub->continuation());
2131
2132 __ mvn_32(tmp, R0);
2133 restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only
2134 __ sub_32(length, length, tmp);
2135 __ add_32(src_pos, src_pos, tmp);
2136 __ add_32(dst_pos, dst_pos, tmp);
2137
2138 __ b(*stub->entry());
2139
2140 __ bind(*stub->continuation());
2141 return;
2142 }
2143
2144 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(),
2145 "must be true at this point");
2146 int elem_size = type2aelembytes(basic_type);
2147 int shift = exact_log2(elem_size);
2148
2149 // Check for null
2150 if (flags & LIR_OpArrayCopy::src_null_check) {
2151 if (flags & LIR_OpArrayCopy::dst_null_check) {
2152 __ cmp(src, 0);
2153 __ cond_cmp(dst, 0, ne); // make one instruction shorter if both checks are needed
2154 __ b(*stub->entry(), eq);
2155 } else {
2156 __ cbz(src, *stub->entry());
2157 }
2158 } else if (flags & LIR_OpArrayCopy::dst_null_check) {
2159 __ cbz(dst, *stub->entry());
2160 }
2161
2162 // If the compiler was not able to prove that exact type of the source or the destination
2163 // of the arraycopy is an array type, check at runtime if the source or the destination is
2164 // an instance type.
2165 if (flags & LIR_OpArrayCopy::type_check) {
2166 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2167 __ load_klass(tmp, dst);
2168 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2169 __ mov_slow(tmp, Klass::_lh_neutral_value);
2170 __ cmp_32(tmp2, tmp);
2171 __ b(*stub->entry(), ge);
2172 }
2173
2174 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2175 __ load_klass(tmp, src);
2176 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2177 __ mov_slow(tmp, Klass::_lh_neutral_value);
2178 __ cmp_32(tmp2, tmp);
2179 __ b(*stub->entry(), ge);
2180 }
2181 }
2182
2183 // Check if negative
2184 const int all_positive_checks = LIR_OpArrayCopy::src_pos_positive_check |
2185 LIR_OpArrayCopy::dst_pos_positive_check |
2186 LIR_OpArrayCopy::length_positive_check;
2187 switch (flags & all_positive_checks) {
2188 case LIR_OpArrayCopy::src_pos_positive_check:
2189 __ branch_if_negative_32(src_pos, *stub->entry());
2190 break;
2191 case LIR_OpArrayCopy::dst_pos_positive_check:
2192 __ branch_if_negative_32(dst_pos, *stub->entry());
2193 break;
2194 case LIR_OpArrayCopy::length_positive_check:
2195 __ branch_if_negative_32(length, *stub->entry());
2196 break;
2197 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::dst_pos_positive_check:
2198 __ branch_if_any_negative_32(src_pos, dst_pos, tmp, *stub->entry());
2199 break;
2200 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::length_positive_check:
2201 __ branch_if_any_negative_32(src_pos, length, tmp, *stub->entry());
2202 break;
2203 case LIR_OpArrayCopy::dst_pos_positive_check | LIR_OpArrayCopy::length_positive_check:
2204 __ branch_if_any_negative_32(dst_pos, length, tmp, *stub->entry());
2205 break;
2206 case all_positive_checks:
2207 __ branch_if_any_negative_32(src_pos, dst_pos, length, tmp, *stub->entry());
2208 break;
2209 default:
2210 assert((flags & all_positive_checks) == 0, "the last option");
2211 }
2212
2213 // Range checks
2214 if (flags & LIR_OpArrayCopy::src_range_check) {
2215 __ ldr_s32(tmp2, Address(src, arrayOopDesc::length_offset_in_bytes()));
2216 __ add_32(tmp, src_pos, length);
2217 __ cmp_32(tmp, tmp2);
2218 __ b(*stub->entry(), hi);
2219 }
2220 if (flags & LIR_OpArrayCopy::dst_range_check) {
2221 __ ldr_s32(tmp2, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2222 __ add_32(tmp, dst_pos, length);
2223 __ cmp_32(tmp, tmp2);
2224 __ b(*stub->entry(), hi);
2225 }
2226
2227 // Check if src and dst are of the same type
2228 if (flags & LIR_OpArrayCopy::type_check) {
2229 // We don't know the array types are compatible
2230 if (basic_type != T_OBJECT) {
2231 // Simple test for basic type arrays
2232 __ load_klass(tmp, src);
2233 __ load_klass(tmp2, dst);
2234 __ cmp(tmp, tmp2);
2235 __ b(*stub->entry(), ne);
2236 } else {
2237 // For object arrays, if src is a sub class of dst then we can
2238 // safely do the copy.
2239 Label cont, slow;
2240
2241 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2242
2243 __ load_klass(tmp, src);
2244 __ load_klass(tmp2, dst);
2245
2246 // We are at a call so all live registers are saved before we
2247 // get here
2248 assert_different_registers(tmp, tmp2, R6, altFP_7_11);
2249
2250 __ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == nullptr ? stub->entry() : &slow, nullptr);
2251
2252 __ mov(R6, R0);
2253 __ mov(altFP_7_11, R1);
2254 __ mov(R0, tmp);
2255 __ mov(R1, tmp2);
2256 __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp
2257 __ cmp_32(R0, 0);
2258 __ mov(R0, R6);
2259 __ mov(R1, altFP_7_11);
2260
2261 if (copyfunc_addr != nullptr) { // use stub if available
2262 // src is not a sub class of dst so we have to do a
2263 // per-element check.
2264
2265 __ b(cont, ne);
2266
2267 __ bind(slow);
2268
2269 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2270 if ((flags & mask) != mask) {
2271 // Check that at least both of them object arrays.
2272 assert(flags & mask, "one of the two should be known to be an object array");
2273
2274 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2275 __ load_klass(tmp, src);
2276 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2277 __ load_klass(tmp, dst);
2278 }
2279 int lh_offset = in_bytes(Klass::layout_helper_offset());
2280
2281 __ ldr_u32(tmp2, Address(tmp, lh_offset));
2282
2283 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2284 __ mov_slow(tmp, objArray_lh);
2285 __ cmp_32(tmp, tmp2);
2286 __ b(*stub->entry(), ne);
2287 }
2288
2289 save_in_reserved_area(R0, R1, R2, R3);
2290
2291 Register src_ptr = R0;
2292 Register dst_ptr = R1;
2293 Register len = R2;
2294 Register chk_off = R3;
2295 Register super_k = tmp;
2296
2297 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2298 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
2299
2300 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2301 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
2302 __ load_klass(tmp, dst);
2303
2304 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2305 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2306
2307 __ ldr(super_k, Address(tmp, ek_offset));
2308
2309 __ mov(len, length);
2310 __ ldr_u32(chk_off, Address(super_k, sco_offset));
2311 __ push(super_k);
2312
2313 __ call(copyfunc_addr, relocInfo::runtime_call_type);
2314
2315 #ifndef PRODUCT
2316 if (PrintC1Statistics) {
2317 Label failed;
2318 __ cbnz_32(R0, failed);
2319 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2);
2320 __ bind(failed);
2321 }
2322 #endif // PRODUCT
2323
2324 __ add(SP, SP, wordSize); // Drop super_k argument
2325
2326 __ cbz_32(R0, *stub->continuation());
2327 __ mvn_32(tmp, R0);
2328
2329 // load saved arguments in slow case only
2330 restore_from_reserved_area(R0, R1, R2, R3);
2331
2332 __ sub_32(length, length, tmp);
2333 __ add_32(src_pos, src_pos, tmp);
2334 __ add_32(dst_pos, dst_pos, tmp);
2335
2336 #ifndef PRODUCT
2337 if (PrintC1Statistics) {
2338 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2);
2339 }
2340 #endif
2341
2342 __ b(*stub->entry());
2343
2344 __ bind(cont);
2345 } else {
2346 __ b(*stub->entry(), eq);
2347 __ bind(cont);
2348 }
2349 }
2350 }
2351
2352 #ifndef PRODUCT
2353 if (PrintC1Statistics) {
2354 address counter = Runtime1::arraycopy_count_address(basic_type);
2355 __ inc_counter(counter, tmp, tmp2);
2356 }
2357 #endif // !PRODUCT
2358
2359 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2360 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2361 const char *name;
2362 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2363
2364 Register src_ptr = R0;
2365 Register dst_ptr = R1;
2366 Register len = R2;
2367
2368 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2369 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
2370
2371 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2372 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
2373
2374 __ mov(len, length);
2375
2376 __ call(entry, relocInfo::runtime_call_type);
2377
2378 __ bind(*stub->continuation());
2379 }
2380
2381 #ifdef ASSERT
2382 // emit run-time assertion
2383 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2384 assert(op->code() == lir_assert, "must be");
2385
2386 if (op->in_opr1()->is_valid()) {
2387 assert(op->in_opr2()->is_valid(), "both operands must be valid");
2388 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2389 } else {
2390 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2391 assert(op->condition() == lir_cond_always, "no other conditions allowed");
2392 }
2393
2394 Label ok;
2395 if (op->condition() != lir_cond_always) {
2396 AsmCondition acond = al;
2397 switch (op->condition()) {
2398 case lir_cond_equal: acond = eq; break;
2399 case lir_cond_notEqual: acond = ne; break;
2400 case lir_cond_less: acond = lt; break;
2401 case lir_cond_lessEqual: acond = le; break;
2402 case lir_cond_greaterEqual: acond = ge; break;
2403 case lir_cond_greater: acond = gt; break;
2404 case lir_cond_aboveEqual: acond = hs; break;
2405 case lir_cond_belowEqual: acond = ls; break;
2406 default: ShouldNotReachHere();
2407 }
2408 __ b(ok, acond);
2409 }
2410 if (op->halt()) {
2411 const char* str = __ code_string(op->msg());
2412 __ stop(str);
2413 } else {
2414 breakpoint();
2415 }
2416 __ bind(ok);
2417 }
2418 #endif // ASSERT
2419
2420 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2421 fatal("CRC32 intrinsic is not implemented on this platform");
2422 }
2423
2424 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2425 Register obj = op->obj_opr()->as_pointer_register();
2426 Register hdr = op->hdr_opr()->as_pointer_register();
2427 Register lock = op->lock_opr()->as_pointer_register();
2428
2429 if (op->code() == lir_lock) {
2430 int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
2431 if (op->info() != nullptr) {
2432 add_debug_info_for_null_check(null_check_offset, op->info());
2433 }
2434 } else if (op->code() == lir_unlock) {
2435 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2436 } else {
2437 ShouldNotReachHere();
2438 }
2439 __ bind(*op->stub()->continuation());
2440 }
2441
2442 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2443 Register obj = op->obj()->as_pointer_register();
2444 Register result = op->result_opr()->as_pointer_register();
2445
2446 CodeEmitInfo* info = op->info();
2447 if (info != nullptr) {
2448 add_debug_info_for_null_check_here(info);
2449 }
2450 __ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
2451 }
2452
2453 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2454 ciMethod* method = op->profiled_method();
2455 int bci = op->profiled_bci();
2456 ciMethod* callee = op->profiled_callee();
2457
2458 // Update counter for all call types
2459 ciMethodData* md = method->method_data_or_null();
2460 assert(md != nullptr, "Sanity");
2461 ciProfileData* data = md->bci_to_data(bci);
2462 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2463 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2464 Register mdo = op->mdo()->as_register();
2465 assert(op->tmp1()->is_register(), "tmp1 must be allocated");
2466 Register tmp1 = op->tmp1()->as_pointer_register();
2467 assert_different_registers(mdo, tmp1);
2468 __ mov_metadata(mdo, md->constant_encoding());
2469 int mdo_offset_bias = 0;
2470 int max_offset = 4096;
2471 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) {
2472 // The offset is large so bias the mdo by the base of the slot so
2473 // that the ldr can use an immediate offset to reference the slots of the data
2474 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2475 __ mov_slow(tmp1, mdo_offset_bias);
2476 __ add(mdo, mdo, tmp1);
2477 }
2478
2479 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2480 // Perform additional virtual call profiling for invokevirtual and
2481 // invokeinterface bytecodes
2482 if (op->should_profile_receiver_type()) {
2483 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2484 Register recv = op->recv()->as_register();
2485 assert_different_registers(mdo, tmp1, recv);
2486 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2487 ciKlass* known_klass = op->known_holder();
2488 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2489 // We know the type that will be seen at this call site; we can
2490 // statically update the MethodData* rather than needing to do
2491 // dynamic tests on the receiver type
2492
2493 // NOTE: we should probably put a lock around this search to
2494 // avoid collisions by concurrent compilations
2495 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2496 uint i;
2497 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2498 ciKlass* receiver = vc_data->receiver(i);
2499 if (known_klass->equals(receiver)) {
2500 Address data_addr(mdo, md->byte_offset_of_slot(data,
2501 VirtualCallData::receiver_count_offset(i)) -
2502 mdo_offset_bias);
2503 __ ldr(tmp1, data_addr);
2504 __ add(tmp1, tmp1, DataLayout::counter_increment);
2505 __ str(tmp1, data_addr);
2506 return;
2507 }
2508 }
2509
2510 // Receiver type not found in profile data; select an empty slot
2511
2512 // Note that this is less efficient than it should be because it
2513 // always does a write to the receiver part of the
2514 // VirtualCallData rather than just the first time
2515 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2516 ciKlass* receiver = vc_data->receiver(i);
2517 if (receiver == nullptr) {
2518 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2519 mdo_offset_bias);
2520 __ mov_metadata(tmp1, known_klass->constant_encoding());
2521 __ str(tmp1, recv_addr);
2522 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2523 mdo_offset_bias);
2524 __ ldr(tmp1, data_addr);
2525 __ add(tmp1, tmp1, DataLayout::counter_increment);
2526 __ str(tmp1, data_addr);
2527 return;
2528 }
2529 }
2530 } else {
2531 __ load_klass(recv, recv);
2532 Label update_done;
2533 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
2534 // Receiver did not match any saved receiver and there is no empty row for it.
2535 // Increment total counter to indicate polymorphic case.
2536 __ ldr(tmp1, counter_addr);
2537 __ add(tmp1, tmp1, DataLayout::counter_increment);
2538 __ str(tmp1, counter_addr);
2539
2540 __ bind(update_done);
2541 }
2542 } else {
2543 // Static call
2544 __ ldr(tmp1, counter_addr);
2545 __ add(tmp1, tmp1, DataLayout::counter_increment);
2546 __ str(tmp1, counter_addr);
2547 }
2548 }
2549
2550 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2551 fatal("Type profiling not implemented on this platform");
2552 }
2553
2554 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
2555 Unimplemented();
2556 }
2557
2558 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2559 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2560 __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp());
2561 }
2562
2563
2564 void LIR_Assembler::align_backward_branch_target() {
2565 // Some ARM processors do better with 8-byte branch target alignment
2566 __ align(8);
2567 }
2568
2569
2570 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2571 // tmp must be unused
2572 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2573
2574 if (left->is_single_cpu()) {
2575 assert (dest->type() == T_INT, "unexpected result type");
2576 assert (left->type() == T_INT, "unexpected left type");
2577 __ neg_32(dest->as_register(), left->as_register());
2578 } else if (left->is_double_cpu()) {
2579 Register dest_lo = dest->as_register_lo();
2580 Register dest_hi = dest->as_register_hi();
2581 Register src_lo = left->as_register_lo();
2582 Register src_hi = left->as_register_hi();
2583 if (dest_lo == src_hi) {
2584 dest_lo = Rtemp;
2585 }
2586 __ rsbs(dest_lo, src_lo, 0);
2587 __ rsc(dest_hi, src_hi, 0);
2588 move_regs(dest_lo, dest->as_register_lo());
2589 } else if (left->is_single_fpu()) {
2590 __ neg_float(dest->as_float_reg(), left->as_float_reg());
2591 } else if (left->is_double_fpu()) {
2592 __ neg_double(dest->as_double_reg(), left->as_double_reg());
2593 } else {
2594 ShouldNotReachHere();
2595 }
2596 }
2597
2598
2599 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2600 assert(patch_code == lir_patch_none, "Patch code not supported");
2601 LIR_Address* addr = addr_opr->as_address_ptr();
2602 if (addr->index()->is_illegal()) {
2603 jint c = addr->disp();
2604 if (!Assembler::is_arith_imm_in_range(c)) {
2605 BAILOUT("illegal arithmetic operand");
2606 }
2607 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c);
2608 } else {
2609 assert(addr->disp() == 0, "cannot handle otherwise");
2610 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(),
2611 AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale()));
2612 }
2613 }
2614
2615
2616 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2617 assert(!tmp->is_valid(), "don't need temporary");
2618 __ call(dest);
2619 if (info != nullptr) {
2620 add_call_info_here(info);
2621 }
2622 }
2623
2624
2625 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2626 assert((src->is_double_cpu() && dest->is_address()) ||
2627 (src->is_address() && dest->is_double_cpu()),
2628 "Simple move_op is called for all other cases");
2629
2630 int null_check_offset;
2631 if (dest->is_address()) {
2632 // Store
2633 const LIR_Address* addr = dest->as_address_ptr();
2634 const Register src_lo = src->as_register_lo();
2635 const Register src_hi = src->as_register_hi();
2636 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
2637
2638 if (src_lo < src_hi) {
2639 null_check_offset = __ offset();
2640 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi));
2641 } else {
2642 assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register");
2643 __ mov(Rtemp, src_hi);
2644 null_check_offset = __ offset();
2645 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp));
2646 }
2647 } else {
2648 // Load
2649 const LIR_Address* addr = src->as_address_ptr();
2650 const Register dest_lo = dest->as_register_lo();
2651 const Register dest_hi = dest->as_register_hi();
2652 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
2653
2654 null_check_offset = __ offset();
2655 if (dest_lo < dest_hi) {
2656 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi));
2657 } else {
2658 assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register");
2659 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp));
2660 __ mov(dest_hi, Rtemp);
2661 }
2662 }
2663
2664 if (info != nullptr) {
2665 add_debug_info_for_null_check(null_check_offset, info);
2666 }
2667 }
2668
2669
2670 void LIR_Assembler::membar() {
2671 __ membar(MacroAssembler::StoreLoad, Rtemp);
2672 }
2673
2674 void LIR_Assembler::membar_acquire() {
2675 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
2676 }
2677
2678 void LIR_Assembler::membar_release() {
2679 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
2680 }
2681
2682 void LIR_Assembler::membar_loadload() {
2683 __ membar(MacroAssembler::LoadLoad, Rtemp);
2684 }
2685
2686 void LIR_Assembler::membar_storestore() {
2687 __ membar(MacroAssembler::StoreStore, Rtemp);
2688 }
2689
2690 void LIR_Assembler::membar_loadstore() {
2691 __ membar(MacroAssembler::LoadStore, Rtemp);
2692 }
2693
2694 void LIR_Assembler::membar_storeload() {
2695 __ membar(MacroAssembler::StoreLoad, Rtemp);
2696 }
2697
2698 void LIR_Assembler::on_spin_wait() {
2699 Unimplemented();
2700 }
2701
2702 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2703 // Not used on ARM
2704 Unimplemented();
2705 }
2706
2707 void LIR_Assembler::peephole(LIR_List* lir) {
2708 LIR_OpList* inst = lir->instructions_list();
2709 const int inst_length = inst->length();
2710 for (int i = 0; i < inst_length; i++) {
2711 LIR_Op* op = inst->at(i);
2712 switch (op->code()) {
2713 case lir_cmp: {
2714 // Replace:
2715 // cmp rX, y
2716 // cmove [EQ] y, z, rX
2717 // with
2718 // cmp rX, y
2719 // cmove [EQ] illegalOpr, z, rX
2720 //
2721 // or
2722 // cmp rX, y
2723 // cmove [NE] z, y, rX
2724 // with
2725 // cmp rX, y
2726 // cmove [NE] z, illegalOpr, rX
2727 //
2728 // moves from illegalOpr should be removed when converting LIR to native assembly
2729
2730 LIR_Op2* cmp = op->as_Op2();
2731 assert(cmp != nullptr, "cmp LIR instruction is not an op2");
2732
2733 if (i + 1 < inst_length) {
2734 LIR_Op2* cmove = inst->at(i + 1)->as_Op2();
2735 if (cmove != nullptr && cmove->code() == lir_cmove) {
2736 LIR_Opr cmove_res = cmove->result_opr();
2737 bool res_is_op1 = cmove_res == cmp->in_opr1();
2738 bool res_is_op2 = cmove_res == cmp->in_opr2();
2739 LIR_Opr cmp_res, cmp_arg;
2740 if (res_is_op1) {
2741 cmp_res = cmp->in_opr1();
2742 cmp_arg = cmp->in_opr2();
2743 } else if (res_is_op2) {
2744 cmp_res = cmp->in_opr2();
2745 cmp_arg = cmp->in_opr1();
2746 } else {
2747 cmp_res = LIR_OprFact::illegalOpr;
2748 cmp_arg = LIR_OprFact::illegalOpr;
2749 }
2750
2751 if (cmp_res != LIR_OprFact::illegalOpr) {
2752 LIR_Condition cond = cmove->condition();
2753 if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) {
2754 cmove->set_in_opr1(LIR_OprFact::illegalOpr);
2755 } else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) {
2756 cmove->set_in_opr2(LIR_OprFact::illegalOpr);
2757 }
2758 }
2759 }
2760 }
2761 break;
2762 }
2763
2764 default:
2765 break;
2766 }
2767 }
2768 }
2769
2770 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
2771 assert(src->is_address(), "sanity");
2772 Address addr = as_Address(src->as_address_ptr());
2773
2774 if (code == lir_xchg) {
2775 } else {
2776 assert (!data->is_oop(), "xadd for oops");
2777 }
2778
2779 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
2780
2781 Label retry;
2782 __ bind(retry);
2783
2784 if (data->type() == T_INT || data->is_oop()) {
2785 Register dst = dest->as_register();
2786 Register new_val = noreg;
2787 __ ldrex(dst, addr);
2788 if (code == lir_xadd) {
2789 Register tmp_reg = tmp->as_register();
2790 if (data->is_constant()) {
2791 assert_different_registers(dst, tmp_reg);
2792 __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint());
2793 } else {
2794 assert_different_registers(dst, tmp_reg, data->as_register());
2795 __ add_32(tmp_reg, dst, data->as_register());
2796 }
2797 new_val = tmp_reg;
2798 } else {
2799 if (UseCompressedOops && data->is_oop()) {
2800 new_val = tmp->as_pointer_register();
2801 } else {
2802 new_val = data->as_register();
2803 }
2804 assert_different_registers(dst, new_val);
2805 }
2806 __ strex(Rtemp, new_val, addr);
2807
2808 } else if (data->type() == T_LONG) {
2809 Register dst_lo = dest->as_register_lo();
2810 Register new_val_lo = noreg;
2811 Register dst_hi = dest->as_register_hi();
2812
2813 assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair");
2814 assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair");
2815
2816 __ bind(retry);
2817 __ ldrexd(dst_lo, addr);
2818 if (code == lir_xadd) {
2819 Register tmp_lo = tmp->as_register_lo();
2820 Register tmp_hi = tmp->as_register_hi();
2821
2822 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
2823 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
2824
2825 if (data->is_constant()) {
2826 jlong c = data->as_constant_ptr()->as_jlong();
2827 assert((jlong)((jint)c) == c, "overflow");
2828 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi);
2829 __ adds(tmp_lo, dst_lo, (jint)c);
2830 __ adc(tmp_hi, dst_hi, 0);
2831 } else {
2832 Register new_val_lo = data->as_register_lo();
2833 Register new_val_hi = data->as_register_hi();
2834 __ adds(tmp_lo, dst_lo, new_val_lo);
2835 __ adc(tmp_hi, dst_hi, new_val_hi);
2836 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi);
2837 }
2838 new_val_lo = tmp_lo;
2839 } else {
2840 new_val_lo = data->as_register_lo();
2841 Register new_val_hi = data->as_register_hi();
2842
2843 assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi);
2844 assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair");
2845 assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair");
2846 }
2847 __ strexd(Rtemp, new_val_lo, addr);
2848 } else {
2849 ShouldNotReachHere();
2850 }
2851
2852 __ cbnz_32(Rtemp, retry);
2853 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
2854
2855 }
2856
2857 #undef __