1 /* 2 * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "memory/universe.hpp" 36 #include "nativeInst_arm.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/powerOfTwo.hpp" 42 #include "vmreg_arm.inline.hpp" 43 44 #define __ _masm-> 45 46 // Note: Rtemp usage is this file should not impact C2 and should be 47 // correct as long as it is not implicitly used in lower layers (the 48 // arm [macro]assembler) and used with care in the other C1 specific 49 // files. 50 51 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 52 ShouldNotCallThis(); // Not used on ARM 53 return false; 54 } 55 56 57 LIR_Opr LIR_Assembler::receiverOpr() { 58 // The first register in Java calling conventions 59 return FrameMap::R0_oop_opr; 60 } 61 62 LIR_Opr LIR_Assembler::osrBufferPointer() { 63 return FrameMap::as_pointer_opr(R0); 64 } 65 66 #ifndef PRODUCT 67 void LIR_Assembler::verify_reserved_argument_area_size(int args_count) { 68 assert(args_count * wordSize <= frame_map()->reserved_argument_area_size(), "not enough space for arguments"); 69 } 70 #endif // !PRODUCT 71 72 void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) { 73 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 74 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 75 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 76 __ mov_slow(Rtemp, c); 77 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 78 } 79 80 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_sp_in_words) { 81 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 82 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 83 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 84 __ mov_metadata(Rtemp, m); 85 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 86 } 87 88 //--------------fpu register translations----------------------- 89 90 91 void LIR_Assembler::breakpoint() { 92 __ breakpoint(); 93 } 94 95 void LIR_Assembler::push(LIR_Opr opr) { 96 Unimplemented(); 97 } 98 99 void LIR_Assembler::pop(LIR_Opr opr) { 100 Unimplemented(); 101 } 102 103 //------------------------------------------- 104 Address LIR_Assembler::as_Address(LIR_Address* addr) { 105 Register base = addr->base()->as_pointer_register(); 106 107 108 if (addr->index()->is_illegal() || addr->index()->is_constant()) { 109 int offset = addr->disp(); 110 if (addr->index()->is_constant()) { 111 offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale(); 112 } 113 114 if ((offset <= -4096) || (offset >= 4096)) { 115 BAILOUT_("offset not in range", Address(base)); 116 } 117 118 return Address(base, offset); 119 120 } else { 121 assert(addr->disp() == 0, "can't have both"); 122 int scale = addr->scale(); 123 124 assert(addr->index()->is_single_cpu(), "should be"); 125 return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) : 126 Address(base, addr->index()->as_register(), lsr, -scale); 127 } 128 } 129 130 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 131 Address base = as_Address(addr); 132 assert(base.index() == noreg, "must be"); 133 if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); } 134 return Address(base.base(), base.disp() + BytesPerWord); 135 } 136 137 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 138 return as_Address(addr); 139 } 140 141 142 void LIR_Assembler::osr_entry() { 143 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 144 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 145 ValueStack* entry_state = osr_entry->end()->state(); 146 int number_of_locks = entry_state->locks_size(); 147 148 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 149 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 150 151 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 152 int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord; 153 for (int i = 0; i < number_of_locks; i++) { 154 int slot_offset = monitor_offset - (i * 2 * BytesPerWord); 155 __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord)); 156 __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 157 __ str(R1, frame_map()->address_for_monitor_lock(i)); 158 __ str(R2, frame_map()->address_for_monitor_object(i)); 159 } 160 } 161 162 163 int LIR_Assembler::check_icache() { 164 return __ ic_check(CodeEntryAlignment); 165 } 166 167 void LIR_Assembler::clinit_barrier(ciMethod* method) { 168 ShouldNotReachHere(); // not implemented 169 } 170 171 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 172 jobject o = (jobject)Universe::non_oop_word(); 173 int index = __ oop_recorder()->allocate_oop_index(o); 174 175 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index); 176 177 __ patchable_mov_oop(reg, o, index); 178 patching_epilog(patch, lir_patch_normal, reg, info); 179 } 180 181 182 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 183 Metadata* o = (Metadata*)Universe::non_oop_word(); 184 int index = __ oop_recorder()->allocate_metadata_index(o); 185 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 186 187 __ patchable_mov_metadata(reg, o, index); 188 patching_epilog(patch, lir_patch_normal, reg, info); 189 } 190 191 192 int LIR_Assembler::initial_frame_size_in_bytes() const { 193 // Subtracts two words to account for return address and link 194 return frame_map()->framesize()*VMRegImpl::stack_slot_size - 2*wordSize; 195 } 196 197 198 int LIR_Assembler::emit_exception_handler() { 199 address handler_base = __ start_a_stub(exception_handler_size()); 200 if (handler_base == nullptr) { 201 bailout("exception handler overflow"); 202 return -1; 203 } 204 205 int offset = code_offset(); 206 207 // check that there is really an exception 208 __ verify_not_null_oop(Rexception_obj); 209 210 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 211 __ should_not_reach_here(); 212 213 assert(code_offset() - offset <= exception_handler_size(), "overflow"); 214 __ end_a_stub(); 215 216 return offset; 217 } 218 219 // Emit the code to remove the frame from the stack in the exception 220 // unwind path. 221 int LIR_Assembler::emit_unwind_handler() { 222 #ifndef PRODUCT 223 if (CommentedAssembly) { 224 _masm->block_comment("Unwind handler"); 225 } 226 #endif 227 228 int offset = code_offset(); 229 230 // Fetch the exception from TLS and clear out exception related thread state 231 Register zero = __ zero_register(Rtemp); 232 __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset())); 233 __ str(zero, Address(Rthread, JavaThread::exception_oop_offset())); 234 __ str(zero, Address(Rthread, JavaThread::exception_pc_offset())); 235 236 __ bind(_unwind_handler_entry); 237 __ verify_not_null_oop(Rexception_obj); 238 239 // Perform needed unlocking 240 MonitorExitStub* stub = nullptr; 241 if (method()->is_synchronized()) { 242 monitor_address(0, FrameMap::R0_opr); 243 stub = new MonitorExitStub(FrameMap::R0_opr, true, 0); 244 __ unlock_object(R2, R1, R0, *stub->entry()); 245 __ bind(*stub->continuation()); 246 } 247 248 // remove the activation and dispatch to the unwind handler 249 __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR 250 __ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp); 251 252 // Emit the slow path assembly 253 if (stub != nullptr) { 254 stub->emit_code(this); 255 } 256 257 return offset; 258 } 259 260 261 int LIR_Assembler::emit_deopt_handler() { 262 address handler_base = __ start_a_stub(deopt_handler_size()); 263 if (handler_base == nullptr) { 264 bailout("deopt handler overflow"); 265 return -1; 266 } 267 268 int offset = code_offset(); 269 270 __ mov_relative_address(LR, __ pc()); 271 __ push(LR); // stub expects LR to be saved 272 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg); 273 274 assert(code_offset() - offset <= deopt_handler_size(), "overflow"); 275 __ end_a_stub(); 276 277 return offset; 278 } 279 280 281 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 282 // Pop the frame before safepoint polling 283 __ remove_frame(initial_frame_size_in_bytes()); 284 __ read_polling_page(Rtemp, relocInfo::poll_return_type); 285 __ ret(); 286 } 287 288 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 289 290 int offset = __ offset(); 291 __ get_polling_page(Rtemp); 292 __ relocate(relocInfo::poll_type); 293 add_debug_info_for_branch(info); // help pc_desc_at to find correct scope for current PC 294 __ ldr(Rtemp, Address(Rtemp)); 295 296 return offset; 297 } 298 299 300 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 301 if (from_reg != to_reg) { 302 __ mov(to_reg, from_reg); 303 } 304 } 305 306 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 307 assert(src->is_constant() && dest->is_register(), "must be"); 308 LIR_Const* c = src->as_constant_ptr(); 309 310 switch (c->type()) { 311 case T_ADDRESS: 312 case T_INT: 313 assert(patch_code == lir_patch_none, "no patching handled here"); 314 __ mov_slow(dest->as_register(), c->as_jint()); 315 break; 316 317 case T_LONG: 318 assert(patch_code == lir_patch_none, "no patching handled here"); 319 __ mov_slow(dest->as_register_lo(), c->as_jint_lo()); 320 __ mov_slow(dest->as_register_hi(), c->as_jint_hi()); 321 break; 322 323 case T_OBJECT: 324 if (patch_code == lir_patch_none) { 325 __ mov_oop(dest->as_register(), c->as_jobject()); 326 } else { 327 jobject2reg_with_patching(dest->as_register(), info); 328 } 329 break; 330 331 case T_METADATA: 332 if (patch_code == lir_patch_none) { 333 __ mov_metadata(dest->as_register(), c->as_metadata()); 334 } else { 335 klass2reg_with_patching(dest->as_register(), info); 336 } 337 break; 338 339 case T_FLOAT: 340 if (dest->is_single_fpu()) { 341 __ mov_float(dest->as_float_reg(), c->as_jfloat()); 342 } else { 343 // Simple getters can return float constant directly into r0 344 __ mov_slow(dest->as_register(), c->as_jint_bits()); 345 } 346 break; 347 348 case T_DOUBLE: 349 if (dest->is_double_fpu()) { 350 __ mov_double(dest->as_double_reg(), c->as_jdouble()); 351 } else { 352 // Simple getters can return double constant directly into r1r0 353 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits()); 354 __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits()); 355 } 356 break; 357 358 default: 359 ShouldNotReachHere(); 360 } 361 } 362 363 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 364 assert(src->is_constant(), "must be"); 365 assert(dest->is_stack(), "must be"); 366 LIR_Const* c = src->as_constant_ptr(); 367 368 switch (c->type()) { 369 case T_INT: // fall through 370 case T_FLOAT: 371 __ mov_slow(Rtemp, c->as_jint_bits()); 372 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 373 break; 374 375 case T_ADDRESS: 376 __ mov_slow(Rtemp, c->as_jint()); 377 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 378 break; 379 380 case T_OBJECT: 381 __ mov_oop(Rtemp, c->as_jobject()); 382 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 383 break; 384 385 case T_LONG: // fall through 386 case T_DOUBLE: 387 __ mov_slow(Rtemp, c->as_jint_lo_bits()); 388 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 389 if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) { 390 __ mov_slow(Rtemp, c->as_jint_hi_bits()); 391 } 392 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 393 break; 394 395 default: 396 ShouldNotReachHere(); 397 } 398 } 399 400 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 401 CodeEmitInfo* info, bool wide) { 402 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == nullptr),"cannot handle otherwise"); 403 __ mov(Rtemp, 0); 404 405 int null_check_offset = code_offset(); 406 __ str(Rtemp, as_Address(dest->as_address_ptr())); 407 408 if (info != nullptr) { 409 assert(false, "arm32 didn't support this before, investigate if bug"); 410 add_debug_info_for_null_check(null_check_offset, info); 411 } 412 } 413 414 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 415 assert(src->is_register() && dest->is_register(), "must be"); 416 417 if (src->is_single_cpu()) { 418 if (dest->is_single_cpu()) { 419 move_regs(src->as_register(), dest->as_register()); 420 } else if (dest->is_single_fpu()) { 421 __ fmsr(dest->as_float_reg(), src->as_register()); 422 } else { 423 ShouldNotReachHere(); 424 } 425 } else if (src->is_double_cpu()) { 426 if (dest->is_double_cpu()) { 427 __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi()); 428 } else { 429 __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi()); 430 } 431 } else if (src->is_single_fpu()) { 432 if (dest->is_single_fpu()) { 433 __ mov_float(dest->as_float_reg(), src->as_float_reg()); 434 } else if (dest->is_single_cpu()) { 435 __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg()); 436 } else { 437 ShouldNotReachHere(); 438 } 439 } else if (src->is_double_fpu()) { 440 if (dest->is_double_fpu()) { 441 __ mov_double(dest->as_double_reg(), src->as_double_reg()); 442 } else if (dest->is_double_cpu()) { 443 __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg()); 444 } else { 445 ShouldNotReachHere(); 446 } 447 } else { 448 ShouldNotReachHere(); 449 } 450 } 451 452 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 453 assert(src->is_register(), "should not call otherwise"); 454 assert(dest->is_stack(), "should not call otherwise"); 455 456 Address addr = dest->is_single_word() ? 457 frame_map()->address_for_slot(dest->single_stack_ix()) : 458 frame_map()->address_for_slot(dest->double_stack_ix()); 459 460 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 461 if (src->is_single_fpu() || src->is_double_fpu()) { 462 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 463 } 464 465 if (src->is_single_cpu()) { 466 switch (type) { 467 case T_OBJECT: 468 case T_ARRAY: __ verify_oop(src->as_register()); // fall through 469 case T_ADDRESS: 470 case T_METADATA: __ str(src->as_register(), addr); break; 471 case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through 472 case T_INT: __ str_32(src->as_register(), addr); break; 473 default: 474 ShouldNotReachHere(); 475 } 476 } else if (src->is_double_cpu()) { 477 __ str(src->as_register_lo(), addr); 478 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 479 } else if (src->is_single_fpu()) { 480 __ str_float(src->as_float_reg(), addr); 481 } else if (src->is_double_fpu()) { 482 __ str_double(src->as_double_reg(), addr); 483 } else { 484 ShouldNotReachHere(); 485 } 486 } 487 488 489 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 490 LIR_PatchCode patch_code, CodeEmitInfo* info, 491 bool pop_fpu_stack, bool wide) { 492 LIR_Address* to_addr = dest->as_address_ptr(); 493 Register base_reg = to_addr->base()->as_pointer_register(); 494 const bool needs_patching = (patch_code != lir_patch_none); 495 496 PatchingStub* patch = nullptr; 497 if (needs_patching) { 498 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 499 } 500 501 int null_check_offset = code_offset(); 502 503 switch (type) { 504 case T_ARRAY: 505 case T_OBJECT: 506 if (UseCompressedOops && !wide) { 507 ShouldNotReachHere(); 508 } else { 509 __ str(src->as_register(), as_Address(to_addr)); 510 } 511 break; 512 513 case T_ADDRESS: 514 __ str(src->as_pointer_register(), as_Address(to_addr)); 515 break; 516 517 case T_BYTE: 518 case T_BOOLEAN: 519 __ strb(src->as_register(), as_Address(to_addr)); 520 break; 521 522 case T_CHAR: 523 case T_SHORT: 524 __ strh(src->as_register(), as_Address(to_addr)); 525 break; 526 527 case T_INT: 528 #ifdef __SOFTFP__ 529 case T_FLOAT: 530 #endif // __SOFTFP__ 531 __ str_32(src->as_register(), as_Address(to_addr)); 532 break; 533 534 535 #ifdef __SOFTFP__ 536 case T_DOUBLE: 537 #endif // __SOFTFP__ 538 case T_LONG: { 539 Register from_lo = src->as_register_lo(); 540 Register from_hi = src->as_register_hi(); 541 if (to_addr->index()->is_register()) { 542 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 543 assert(to_addr->disp() == 0, "Not yet supporting both"); 544 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 545 base_reg = Rtemp; 546 __ str(from_lo, Address(Rtemp)); 547 if (patch != nullptr) { 548 __ nop(); // see comment before patching_epilog for 2nd str 549 patching_epilog(patch, lir_patch_low, base_reg, info); 550 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 551 patch_code = lir_patch_high; 552 } 553 __ str(from_hi, Address(Rtemp, BytesPerWord)); 554 } else if (base_reg == from_lo) { 555 __ str(from_hi, as_Address_hi(to_addr)); 556 if (patch != nullptr) { 557 __ nop(); // see comment before patching_epilog for 2nd str 558 patching_epilog(patch, lir_patch_high, base_reg, info); 559 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 560 patch_code = lir_patch_low; 561 } 562 __ str(from_lo, as_Address_lo(to_addr)); 563 } else { 564 __ str(from_lo, as_Address_lo(to_addr)); 565 if (patch != nullptr) { 566 __ nop(); // see comment before patching_epilog for 2nd str 567 patching_epilog(patch, lir_patch_low, base_reg, info); 568 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 569 patch_code = lir_patch_high; 570 } 571 __ str(from_hi, as_Address_hi(to_addr)); 572 } 573 break; 574 } 575 576 #ifndef __SOFTFP__ 577 case T_FLOAT: 578 if (to_addr->index()->is_register()) { 579 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 580 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 581 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 582 __ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp())); 583 } else { 584 __ fsts(src->as_float_reg(), as_Address(to_addr)); 585 } 586 break; 587 588 case T_DOUBLE: 589 if (to_addr->index()->is_register()) { 590 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 591 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 592 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 593 __ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp())); 594 } else { 595 __ fstd(src->as_double_reg(), as_Address(to_addr)); 596 } 597 break; 598 #endif // __SOFTFP__ 599 600 601 default: 602 ShouldNotReachHere(); 603 } 604 605 if (info != nullptr) { 606 add_debug_info_for_null_check(null_check_offset, info); 607 } 608 609 if (patch != nullptr) { 610 // Offset embedded into LDR/STR instruction may appear not enough 611 // to address a field. So, provide a space for one more instruction 612 // that will deal with larger offsets. 613 __ nop(); 614 patching_epilog(patch, patch_code, base_reg, info); 615 } 616 } 617 618 619 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 620 assert(src->is_stack(), "should not call otherwise"); 621 assert(dest->is_register(), "should not call otherwise"); 622 623 Address addr = src->is_single_word() ? 624 frame_map()->address_for_slot(src->single_stack_ix()) : 625 frame_map()->address_for_slot(src->double_stack_ix()); 626 627 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 628 if (dest->is_single_fpu() || dest->is_double_fpu()) { 629 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 630 } 631 632 if (dest->is_single_cpu()) { 633 switch (type) { 634 case T_OBJECT: 635 case T_ARRAY: 636 case T_ADDRESS: 637 case T_METADATA: __ ldr(dest->as_register(), addr); break; 638 case T_FLOAT: // used in floatToRawIntBits intrinsic implementation 639 case T_INT: __ ldr_u32(dest->as_register(), addr); break; 640 default: 641 ShouldNotReachHere(); 642 } 643 if ((type == T_OBJECT) || (type == T_ARRAY)) { 644 __ verify_oop(dest->as_register()); 645 } 646 } else if (dest->is_double_cpu()) { 647 __ ldr(dest->as_register_lo(), addr); 648 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 649 } else if (dest->is_single_fpu()) { 650 __ ldr_float(dest->as_float_reg(), addr); 651 } else if (dest->is_double_fpu()) { 652 __ ldr_double(dest->as_double_reg(), addr); 653 } else { 654 ShouldNotReachHere(); 655 } 656 } 657 658 659 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 660 if (src->is_single_stack()) { 661 switch (src->type()) { 662 case T_OBJECT: 663 case T_ARRAY: 664 case T_ADDRESS: 665 case T_METADATA: 666 __ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 667 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 668 break; 669 670 case T_INT: 671 case T_FLOAT: 672 __ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 673 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 674 break; 675 676 default: 677 ShouldNotReachHere(); 678 } 679 } else { 680 assert(src->is_double_stack(), "must be"); 681 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes)); 682 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 683 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 684 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 685 } 686 } 687 688 689 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, 690 LIR_PatchCode patch_code, CodeEmitInfo* info, 691 bool wide) { 692 assert(src->is_address(), "should not call otherwise"); 693 assert(dest->is_register(), "should not call otherwise"); 694 LIR_Address* addr = src->as_address_ptr(); 695 696 Register base_reg = addr->base()->as_pointer_register(); 697 698 PatchingStub* patch = nullptr; 699 if (patch_code != lir_patch_none) { 700 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 701 } 702 if (info != nullptr) { 703 add_debug_info_for_null_check_here(info); 704 } 705 706 switch (type) { 707 case T_OBJECT: // fall through 708 case T_ARRAY: 709 if (UseCompressedOops && !wide) { 710 __ ldr_u32(dest->as_register(), as_Address(addr)); 711 } else { 712 __ ldr(dest->as_register(), as_Address(addr)); 713 } 714 break; 715 716 case T_ADDRESS: 717 __ ldr(dest->as_pointer_register(), as_Address(addr)); 718 break; 719 720 case T_INT: 721 #ifdef __SOFTFP__ 722 case T_FLOAT: 723 #endif // __SOFTFP__ 724 __ ldr(dest->as_pointer_register(), as_Address(addr)); 725 break; 726 727 case T_BOOLEAN: 728 __ ldrb(dest->as_register(), as_Address(addr)); 729 break; 730 731 case T_BYTE: 732 __ ldrsb(dest->as_register(), as_Address(addr)); 733 break; 734 735 case T_CHAR: 736 __ ldrh(dest->as_register(), as_Address(addr)); 737 break; 738 739 case T_SHORT: 740 __ ldrsh(dest->as_register(), as_Address(addr)); 741 break; 742 743 744 #ifdef __SOFTFP__ 745 case T_DOUBLE: 746 #endif // __SOFTFP__ 747 case T_LONG: { 748 Register to_lo = dest->as_register_lo(); 749 Register to_hi = dest->as_register_hi(); 750 if (addr->index()->is_register()) { 751 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 752 assert(addr->disp() == 0, "Not yet supporting both"); 753 __ add(Rtemp, base_reg, addr->index()->as_register()); 754 base_reg = Rtemp; 755 __ ldr(to_lo, Address(Rtemp)); 756 if (patch != nullptr) { 757 __ nop(); // see comment before patching_epilog for 2nd ldr 758 patching_epilog(patch, lir_patch_low, base_reg, info); 759 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 760 patch_code = lir_patch_high; 761 } 762 __ ldr(to_hi, Address(Rtemp, BytesPerWord)); 763 } else if (base_reg == to_lo) { 764 __ ldr(to_hi, as_Address_hi(addr)); 765 if (patch != nullptr) { 766 __ nop(); // see comment before patching_epilog for 2nd ldr 767 patching_epilog(patch, lir_patch_high, base_reg, info); 768 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 769 patch_code = lir_patch_low; 770 } 771 __ ldr(to_lo, as_Address_lo(addr)); 772 } else { 773 __ ldr(to_lo, as_Address_lo(addr)); 774 if (patch != nullptr) { 775 __ nop(); // see comment before patching_epilog for 2nd ldr 776 patching_epilog(patch, lir_patch_low, base_reg, info); 777 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 778 patch_code = lir_patch_high; 779 } 780 __ ldr(to_hi, as_Address_hi(addr)); 781 } 782 break; 783 } 784 785 #ifndef __SOFTFP__ 786 case T_FLOAT: 787 if (addr->index()->is_register()) { 788 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 789 __ add(Rtemp, base_reg, addr->index()->as_register()); 790 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 791 __ flds(dest->as_float_reg(), Address(Rtemp, addr->disp())); 792 } else { 793 __ flds(dest->as_float_reg(), as_Address(addr)); 794 } 795 break; 796 797 case T_DOUBLE: 798 if (addr->index()->is_register()) { 799 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 800 __ add(Rtemp, base_reg, addr->index()->as_register()); 801 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 802 __ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp())); 803 } else { 804 __ fldd(dest->as_double_reg(), as_Address(addr)); 805 } 806 break; 807 #endif // __SOFTFP__ 808 809 810 default: 811 ShouldNotReachHere(); 812 } 813 814 if (patch != nullptr) { 815 // Offset embedded into LDR/STR instruction may appear not enough 816 // to address a field. So, provide a space for one more instruction 817 // that will deal with larger offsets. 818 __ nop(); 819 patching_epilog(patch, patch_code, base_reg, info); 820 } 821 822 } 823 824 825 void LIR_Assembler::emit_op3(LIR_Op3* op) { 826 bool is_32 = op->result_opr()->is_single_cpu(); 827 828 if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) { 829 int c = op->in_opr2()->as_constant_ptr()->as_jint(); 830 assert(is_power_of_2(c), "non power-of-2 constant should be put in a register"); 831 832 Register left = op->in_opr1()->as_register(); 833 Register dest = op->result_opr()->as_register(); 834 if (c == 1) { 835 __ mov(dest, left); 836 } else if (c == 2) { 837 __ add_32(dest, left, AsmOperand(left, lsr, 31)); 838 __ asr_32(dest, dest, 1); 839 } else if (c != (int) 0x80000000) { 840 int power = log2i_exact(c); 841 __ asr_32(Rtemp, left, 31); 842 __ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0); 843 __ asr_32(dest, dest, power); // dest = dest >>> power; 844 } else { 845 // x/0x80000000 is a special case, since dividend is a power of two, but is negative. 846 // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000. 847 __ cmp_32(left, c); 848 __ mov(dest, 0, ne); 849 __ mov(dest, 1, eq); 850 } 851 } else { 852 assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3"); 853 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type); 854 add_debug_info_for_div0_here(op->info()); 855 } 856 } 857 858 859 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 860 #ifdef ASSERT 861 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 862 if (op->block() != nullptr) _branch_target_blocks.append(op->block()); 863 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock()); 864 assert(op->info() == nullptr, "CodeEmitInfo?"); 865 #endif // ASSERT 866 867 #ifdef __SOFTFP__ 868 assert (op->code() != lir_cond_float_branch, "this should be impossible"); 869 #else 870 if (op->code() == lir_cond_float_branch) { 871 __ fmstat(); 872 __ b(*(op->ublock()->label()), vs); 873 } 874 #endif // __SOFTFP__ 875 876 AsmCondition acond = al; 877 switch (op->cond()) { 878 case lir_cond_equal: acond = eq; break; 879 case lir_cond_notEqual: acond = ne; break; 880 case lir_cond_less: acond = lt; break; 881 case lir_cond_lessEqual: acond = le; break; 882 case lir_cond_greaterEqual: acond = ge; break; 883 case lir_cond_greater: acond = gt; break; 884 case lir_cond_aboveEqual: acond = hs; break; 885 case lir_cond_belowEqual: acond = ls; break; 886 default: assert(op->cond() == lir_cond_always, "must be"); 887 } 888 __ b(*(op->label()), acond); 889 } 890 891 892 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 893 LIR_Opr src = op->in_opr(); 894 LIR_Opr dest = op->result_opr(); 895 896 switch (op->bytecode()) { 897 case Bytecodes::_i2l: 898 move_regs(src->as_register(), dest->as_register_lo()); 899 __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31)); 900 break; 901 case Bytecodes::_l2i: 902 move_regs(src->as_register_lo(), dest->as_register()); 903 break; 904 case Bytecodes::_i2b: 905 __ sign_extend(dest->as_register(), src->as_register(), 8); 906 break; 907 case Bytecodes::_i2s: 908 __ sign_extend(dest->as_register(), src->as_register(), 16); 909 break; 910 case Bytecodes::_i2c: 911 __ zero_extend(dest->as_register(), src->as_register(), 16); 912 break; 913 case Bytecodes::_f2d: 914 __ convert_f2d(dest->as_double_reg(), src->as_float_reg()); 915 break; 916 case Bytecodes::_d2f: 917 __ convert_d2f(dest->as_float_reg(), src->as_double_reg()); 918 break; 919 case Bytecodes::_i2f: 920 __ fmsr(Stemp, src->as_register()); 921 __ fsitos(dest->as_float_reg(), Stemp); 922 break; 923 case Bytecodes::_i2d: 924 __ fmsr(Stemp, src->as_register()); 925 __ fsitod(dest->as_double_reg(), Stemp); 926 break; 927 case Bytecodes::_f2i: 928 __ ftosizs(Stemp, src->as_float_reg()); 929 __ fmrs(dest->as_register(), Stemp); 930 break; 931 case Bytecodes::_d2i: 932 __ ftosizd(Stemp, src->as_double_reg()); 933 __ fmrs(dest->as_register(), Stemp); 934 break; 935 default: 936 ShouldNotReachHere(); 937 } 938 } 939 940 941 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 942 if (op->init_check()) { 943 Register tmp = op->tmp1()->as_register(); 944 __ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset())); 945 add_debug_info_for_null_check_here(op->stub()->info()); 946 __ cmp(tmp, InstanceKlass::fully_initialized); 947 __ b(*op->stub()->entry(), ne); 948 } 949 __ allocate_object(op->obj()->as_register(), 950 op->tmp1()->as_register(), 951 op->tmp2()->as_register(), 952 op->tmp3()->as_register(), 953 op->header_size(), 954 op->object_size(), 955 op->klass()->as_register(), 956 *op->stub()->entry()); 957 __ bind(*op->stub()->continuation()); 958 } 959 960 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 961 if (UseSlowPath || 962 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 963 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 964 __ b(*op->stub()->entry()); 965 } else { 966 __ allocate_array(op->obj()->as_register(), 967 op->len()->as_register(), 968 op->tmp1()->as_register(), 969 op->tmp2()->as_register(), 970 op->tmp3()->as_register(), 971 arrayOopDesc::base_offset_in_bytes(op->type()), 972 type2aelembytes(op->type()), 973 op->klass()->as_register(), 974 *op->stub()->entry()); 975 } 976 __ bind(*op->stub()->continuation()); 977 } 978 979 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 980 ciMethodData *md, ciProfileData *data, 981 Register recv, Register tmp1, Label* update_done) { 982 assert_different_registers(mdo, recv, tmp1); 983 uint i; 984 for (i = 0; i < VirtualCallData::row_limit(); i++) { 985 Label next_test; 986 // See if the receiver is receiver[n]. 987 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 988 mdo_offset_bias); 989 __ ldr(tmp1, receiver_addr); 990 __ verify_klass_ptr(tmp1); 991 __ cmp(recv, tmp1); 992 __ b(next_test, ne); 993 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 994 mdo_offset_bias); 995 __ ldr(tmp1, data_addr); 996 __ add(tmp1, tmp1, DataLayout::counter_increment); 997 __ str(tmp1, data_addr); 998 __ b(*update_done); 999 __ bind(next_test); 1000 } 1001 1002 // Didn't find receiver; find next empty slot and fill it in 1003 for (i = 0; i < VirtualCallData::row_limit(); i++) { 1004 Label next_test; 1005 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 1006 mdo_offset_bias); 1007 __ ldr(tmp1, recv_addr); 1008 __ cbnz(tmp1, next_test); 1009 __ str(recv, recv_addr); 1010 __ mov(tmp1, DataLayout::counter_increment); 1011 __ str(tmp1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 1012 mdo_offset_bias)); 1013 __ b(*update_done); 1014 __ bind(next_test); 1015 } 1016 } 1017 1018 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 1019 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 1020 md = method->method_data_or_null(); 1021 assert(md != nullptr, "Sanity"); 1022 data = md->bci_to_data(bci); 1023 assert(data != nullptr, "need data for checkcast"); 1024 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1025 if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) { 1026 // The offset is large so bias the mdo by the base of the slot so 1027 // that the ldr can use an immediate offset to reference the slots of the data 1028 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 1029 } 1030 } 1031 1032 // On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null). 1033 void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci, 1034 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias, 1035 Register obj, Register mdo, Register data_val, Label* obj_is_null) { 1036 assert(method != nullptr, "Should have method"); 1037 assert_different_registers(obj, mdo, data_val); 1038 setup_md_access(method, bci, md, data, mdo_offset_bias); 1039 Label not_null; 1040 __ b(not_null, ne); 1041 __ mov_metadata(mdo, md->constant_encoding()); 1042 if (mdo_offset_bias > 0) { 1043 __ mov_slow(data_val, mdo_offset_bias); 1044 __ add(mdo, mdo, data_val); 1045 } 1046 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 1047 __ ldrb(data_val, flags_addr); 1048 __ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant()); 1049 __ strb(data_val, flags_addr); 1050 __ b(*obj_is_null); 1051 __ bind(not_null); 1052 } 1053 1054 void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias, 1055 Register mdo, Register recv, Register value, Register tmp1, 1056 Label* profile_cast_success, Label* profile_cast_failure, 1057 Label* success, Label* failure) { 1058 assert_different_registers(mdo, value, tmp1); 1059 __ bind(*profile_cast_success); 1060 __ mov_metadata(mdo, md->constant_encoding()); 1061 if (mdo_offset_bias > 0) { 1062 __ mov_slow(tmp1, mdo_offset_bias); 1063 __ add(mdo, mdo, tmp1); 1064 } 1065 __ load_klass(recv, value); 1066 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 1067 __ b(*success); 1068 // Cast failure case 1069 __ bind(*profile_cast_failure); 1070 __ mov_metadata(mdo, md->constant_encoding()); 1071 if (mdo_offset_bias > 0) { 1072 __ mov_slow(tmp1, mdo_offset_bias); 1073 __ add(mdo, mdo, tmp1); 1074 } 1075 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 1076 __ ldr(tmp1, data_addr); 1077 __ sub(tmp1, tmp1, DataLayout::counter_increment); 1078 __ str(tmp1, data_addr); 1079 __ b(*failure); 1080 } 1081 1082 // Sets `res` to true, if `cond` holds. 1083 static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) { 1084 __ mov(res, 1, cond); 1085 } 1086 1087 1088 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1089 // TODO: ARM - can be more effective with one more register 1090 switch (op->code()) { 1091 case lir_store_check: { 1092 CodeStub* stub = op->stub(); 1093 Register value = op->object()->as_register(); 1094 Register array = op->array()->as_register(); 1095 Register klass_RInfo = op->tmp1()->as_register(); 1096 Register k_RInfo = op->tmp2()->as_register(); 1097 assert_different_registers(klass_RInfo, k_RInfo, Rtemp); 1098 if (op->should_profile()) { 1099 assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp); 1100 } 1101 1102 // check if it needs to be profiled 1103 ciMethodData* md; 1104 ciProfileData* data; 1105 int mdo_offset_bias = 0; 1106 Label profile_cast_success, profile_cast_failure, done; 1107 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1108 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1109 1110 if (op->should_profile()) { 1111 __ cmp(value, 0); 1112 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done); 1113 } else { 1114 __ cbz(value, done); 1115 } 1116 assert_different_registers(k_RInfo, value); 1117 add_debug_info_for_null_check_here(op->info_for_exception()); 1118 __ load_klass(k_RInfo, array); 1119 __ load_klass(klass_RInfo, value); 1120 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1121 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1122 // check for immediate positive hit 1123 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1124 __ cmp(klass_RInfo, k_RInfo); 1125 __ cond_cmp(Rtemp, k_RInfo, ne); 1126 __ b(*success_target, eq); 1127 // check for immediate negative hit 1128 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1129 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1130 __ b(*failure_target, ne); 1131 // slow case 1132 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1133 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1134 __ cbz(R0, *failure_target); 1135 if (op->should_profile()) { 1136 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1137 if (mdo == value) { 1138 mdo = k_RInfo; 1139 recv = klass_RInfo; 1140 } 1141 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, value, tmp1, 1142 &profile_cast_success, &profile_cast_failure, 1143 &done, stub->entry()); 1144 } 1145 __ bind(done); 1146 break; 1147 } 1148 1149 case lir_checkcast: { 1150 CodeStub* stub = op->stub(); 1151 Register obj = op->object()->as_register(); 1152 Register res = op->result_opr()->as_register(); 1153 Register klass_RInfo = op->tmp1()->as_register(); 1154 Register k_RInfo = op->tmp2()->as_register(); 1155 ciKlass* k = op->klass(); 1156 assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp); 1157 1158 if (stub->is_simple_exception_stub()) { 1159 // TODO: ARM - Late binding is used to prevent confusion of register allocator 1160 assert(stub->is_exception_throw_stub(), "must be"); 1161 ((SimpleExceptionStub*)stub)->set_obj(op->result_opr()); 1162 } 1163 ciMethodData* md; 1164 ciProfileData* data; 1165 int mdo_offset_bias = 0; 1166 1167 Label done; 1168 1169 Label profile_cast_failure, profile_cast_success; 1170 Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry(); 1171 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1172 1173 1174 __ movs(res, obj); 1175 if (op->should_profile()) { 1176 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1177 } else { 1178 __ b(done, eq); 1179 } 1180 if (k->is_loaded()) { 1181 __ mov_metadata(k_RInfo, k->constant_encoding()); 1182 } else if (k_RInfo != obj) { 1183 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1184 __ movs(res, obj); 1185 } else { 1186 // Patching doesn't update "res" register after GC, so do patching first 1187 klass2reg_with_patching(Rtemp, op->info_for_patch()); 1188 __ movs(res, obj); 1189 __ mov(k_RInfo, Rtemp); 1190 } 1191 __ load_klass(klass_RInfo, res, ne); 1192 1193 if (op->fast_check()) { 1194 __ cmp(klass_RInfo, k_RInfo, ne); 1195 __ b(*failure_target, ne); 1196 } else if (k->is_loaded()) { 1197 __ b(*success_target, eq); 1198 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1199 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1200 __ cmp(Rtemp, k_RInfo); 1201 __ b(*failure_target, ne); 1202 } else { 1203 __ cmp(klass_RInfo, k_RInfo); 1204 __ cmp(Rtemp, k_RInfo, ne); 1205 __ b(*success_target, eq); 1206 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1207 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1208 __ cbz(R0, *failure_target); 1209 } 1210 } else { 1211 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1212 __ b(*success_target, eq); 1213 // check for immediate positive hit 1214 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1215 __ cmp(klass_RInfo, k_RInfo); 1216 __ cmp(Rtemp, k_RInfo, ne); 1217 __ b(*success_target, eq); 1218 // check for immediate negative hit 1219 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1220 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1221 __ b(*failure_target, ne); 1222 // slow case 1223 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1224 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1225 __ cbz(R0, *failure_target); 1226 } 1227 1228 if (op->should_profile()) { 1229 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1230 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1231 &profile_cast_success, &profile_cast_failure, 1232 &done, stub->entry()); 1233 } 1234 __ bind(done); 1235 break; 1236 } 1237 1238 case lir_instanceof: { 1239 Register obj = op->object()->as_register(); 1240 Register res = op->result_opr()->as_register(); 1241 Register klass_RInfo = op->tmp1()->as_register(); 1242 Register k_RInfo = op->tmp2()->as_register(); 1243 ciKlass* k = op->klass(); 1244 assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp); 1245 1246 ciMethodData* md; 1247 ciProfileData* data; 1248 int mdo_offset_bias = 0; 1249 1250 Label done; 1251 1252 Label profile_cast_failure, profile_cast_success; 1253 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done; 1254 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1255 1256 __ movs(res, obj); 1257 1258 if (op->should_profile()) { 1259 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1260 } else { 1261 __ b(done, eq); 1262 } 1263 1264 if (k->is_loaded()) { 1265 __ mov_metadata(k_RInfo, k->constant_encoding()); 1266 } else { 1267 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res)); 1268 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1269 } 1270 __ load_klass(klass_RInfo, res); 1271 1272 if (!op->should_profile()) { 1273 __ mov(res, 0); 1274 } 1275 1276 if (op->fast_check()) { 1277 __ cmp(klass_RInfo, k_RInfo); 1278 if (!op->should_profile()) { 1279 set_instanceof_result(_masm, res, eq); 1280 } else { 1281 __ b(profile_cast_failure, ne); 1282 } 1283 } else if (k->is_loaded()) { 1284 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1285 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1286 __ cmp(Rtemp, k_RInfo); 1287 if (!op->should_profile()) { 1288 set_instanceof_result(_masm, res, eq); 1289 } else { 1290 __ b(profile_cast_failure, ne); 1291 } 1292 } else { 1293 __ cmp(klass_RInfo, k_RInfo); 1294 __ cond_cmp(Rtemp, k_RInfo, ne); 1295 if (!op->should_profile()) { 1296 set_instanceof_result(_masm, res, eq); 1297 } 1298 __ b(*success_target, eq); 1299 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1300 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1301 if (!op->should_profile()) { 1302 move_regs(R0, res); 1303 } else { 1304 __ cbz(R0, *failure_target); 1305 } 1306 } 1307 } else { 1308 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1309 // check for immediate positive hit 1310 __ cmp(klass_RInfo, k_RInfo); 1311 if (!op->should_profile()) { 1312 __ ldr(res, Address(klass_RInfo, Rtemp), ne); 1313 __ cond_cmp(res, k_RInfo, ne); 1314 set_instanceof_result(_masm, res, eq); 1315 } else { 1316 __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne); 1317 __ cond_cmp(Rtemp, k_RInfo, ne); 1318 } 1319 __ b(*success_target, eq); 1320 // check for immediate negative hit 1321 if (op->should_profile()) { 1322 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1323 } 1324 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1325 if (!op->should_profile()) { 1326 __ mov(res, 0, ne); 1327 } 1328 __ b(*failure_target, ne); 1329 // slow case 1330 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1331 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1332 if (!op->should_profile()) { 1333 move_regs(R0, res); 1334 } 1335 if (op->should_profile()) { 1336 __ cbz(R0, *failure_target); 1337 } 1338 } 1339 1340 if (op->should_profile()) { 1341 Label done_ok, done_failure; 1342 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1343 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1344 &profile_cast_success, &profile_cast_failure, 1345 &done_ok, &done_failure); 1346 __ bind(done_failure); 1347 __ mov(res, 0); 1348 __ b(done); 1349 __ bind(done_ok); 1350 __ mov(res, 1); 1351 } 1352 __ bind(done); 1353 break; 1354 } 1355 default: 1356 ShouldNotReachHere(); 1357 } 1358 } 1359 1360 1361 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1362 // if (*addr == cmpval) { 1363 // *addr = newval; 1364 // dest = 1; 1365 // } else { 1366 // dest = 0; 1367 // } 1368 // FIXME: membar_release 1369 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 1370 Register addr = op->addr()->is_register() ? 1371 op->addr()->as_pointer_register() : 1372 op->addr()->as_address_ptr()->base()->as_pointer_register(); 1373 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp"); 1374 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_Opr::illegalOpr(), "unexpected index"); 1375 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 1376 Register cmpval = op->cmp_value()->as_register(); 1377 Register newval = op->new_value()->as_register(); 1378 Register dest = op->result_opr()->as_register(); 1379 assert_different_registers(dest, addr, cmpval, newval, Rtemp); 1380 1381 __ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer 1382 __ mov(dest, 1, eq); 1383 __ mov(dest, 0, ne); 1384 } else if (op->code() == lir_cas_long) { 1385 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 1386 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 1387 Register new_value_lo = op->new_value()->as_register_lo(); 1388 Register new_value_hi = op->new_value()->as_register_hi(); 1389 Register dest = op->result_opr()->as_register(); 1390 Register tmp_lo = op->tmp1()->as_register_lo(); 1391 Register tmp_hi = op->tmp1()->as_register_hi(); 1392 1393 assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr); 1394 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 1395 assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair"); 1396 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1397 assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1398 __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi, 1399 new_value_lo, new_value_hi, addr, 0); 1400 } else { 1401 Unimplemented(); 1402 } 1403 // FIXME: is full membar really needed instead of just membar_acquire? 1404 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 1405 } 1406 1407 1408 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 1409 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 1410 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on arm"); 1411 1412 AsmCondition acond = al; 1413 AsmCondition ncond = nv; 1414 if (opr1 != opr2) { 1415 switch (condition) { 1416 case lir_cond_equal: acond = eq; ncond = ne; break; 1417 case lir_cond_notEqual: acond = ne; ncond = eq; break; 1418 case lir_cond_less: acond = lt; ncond = ge; break; 1419 case lir_cond_lessEqual: acond = le; ncond = gt; break; 1420 case lir_cond_greaterEqual: acond = ge; ncond = lt; break; 1421 case lir_cond_greater: acond = gt; ncond = le; break; 1422 case lir_cond_aboveEqual: acond = hs; ncond = lo; break; 1423 case lir_cond_belowEqual: acond = ls; ncond = hi; break; 1424 default: ShouldNotReachHere(); 1425 } 1426 } 1427 1428 for (;;) { // two iterations only 1429 if (opr1 == result) { 1430 // do nothing 1431 } else if (opr1->is_single_cpu()) { 1432 __ mov(result->as_register(), opr1->as_register(), acond); 1433 } else if (opr1->is_double_cpu()) { 1434 __ long_move(result->as_register_lo(), result->as_register_hi(), 1435 opr1->as_register_lo(), opr1->as_register_hi(), acond); 1436 } else if (opr1->is_single_stack()) { 1437 __ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond); 1438 } else if (opr1->is_double_stack()) { 1439 __ ldr(result->as_register_lo(), 1440 frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond); 1441 __ ldr(result->as_register_hi(), 1442 frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond); 1443 } else if (opr1->is_illegal()) { 1444 // do nothing: this part of the cmove has been optimized away in the peephole optimizer 1445 } else { 1446 assert(opr1->is_constant(), "must be"); 1447 LIR_Const* c = opr1->as_constant_ptr(); 1448 1449 switch (c->type()) { 1450 case T_INT: 1451 __ mov_slow(result->as_register(), c->as_jint(), acond); 1452 break; 1453 case T_LONG: 1454 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1455 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1456 break; 1457 case T_OBJECT: 1458 __ mov_oop(result->as_register(), c->as_jobject(), 0, acond); 1459 break; 1460 case T_FLOAT: 1461 #ifdef __SOFTFP__ 1462 // not generated now. 1463 __ mov_slow(result->as_register(), c->as_jint(), acond); 1464 #else 1465 __ mov_float(result->as_float_reg(), c->as_jfloat(), acond); 1466 #endif // __SOFTFP__ 1467 break; 1468 case T_DOUBLE: 1469 #ifdef __SOFTFP__ 1470 // not generated now. 1471 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1472 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1473 #else 1474 __ mov_double(result->as_double_reg(), c->as_jdouble(), acond); 1475 #endif // __SOFTFP__ 1476 break; 1477 case T_METADATA: 1478 __ mov_metadata(result->as_register(), c->as_metadata(), acond); 1479 break; 1480 default: 1481 ShouldNotReachHere(); 1482 } 1483 } 1484 1485 // Negate the condition and repeat the algorithm with the second operand 1486 if (opr1 == opr2) { break; } 1487 opr1 = opr2; 1488 acond = ncond; 1489 } 1490 } 1491 1492 #ifdef ASSERT 1493 static int reg_size(LIR_Opr op) { 1494 switch (op->type()) { 1495 case T_FLOAT: 1496 case T_INT: return BytesPerInt; 1497 case T_LONG: 1498 case T_DOUBLE: return BytesPerLong; 1499 case T_OBJECT: 1500 case T_ARRAY: 1501 case T_METADATA: return BytesPerWord; 1502 case T_ADDRESS: 1503 case T_ILLEGAL: // fall through 1504 default: ShouldNotReachHere(); return -1; 1505 } 1506 } 1507 #endif 1508 1509 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1510 assert(info == nullptr, "unused on this code path"); 1511 assert(dest->is_register(), "wrong items state"); 1512 1513 if (right->is_address()) { 1514 // special case for adding shifted/extended register 1515 const Register res = dest->as_pointer_register(); 1516 const Register lreg = left->as_pointer_register(); 1517 const LIR_Address* addr = right->as_address_ptr(); 1518 1519 assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1520 1521 int scale = addr->scale(); 1522 AsmShift shift = lsl; 1523 1524 1525 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be"); 1526 assert(reg_size(addr->base()) == reg_size(dest), "should be"); 1527 assert(reg_size(dest) == wordSize, "should be"); 1528 1529 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale); 1530 switch (code) { 1531 case lir_add: __ add(res, lreg, operand); break; 1532 case lir_sub: __ sub(res, lreg, operand); break; 1533 default: ShouldNotReachHere(); 1534 } 1535 1536 } else if (left->is_address()) { 1537 assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()"); 1538 const LIR_Address* addr = left->as_address_ptr(); 1539 const Register res = dest->as_register(); 1540 const Register rreg = right->as_register(); 1541 assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1542 __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale())); 1543 1544 } else if (dest->is_single_cpu()) { 1545 assert(left->is_single_cpu(), "unexpected left operand"); 1546 1547 const Register res = dest->as_register(); 1548 const Register lreg = left->as_register(); 1549 1550 if (right->is_single_cpu()) { 1551 const Register rreg = right->as_register(); 1552 switch (code) { 1553 case lir_add: __ add_32(res, lreg, rreg); break; 1554 case lir_sub: __ sub_32(res, lreg, rreg); break; 1555 case lir_mul: __ mul_32(res, lreg, rreg); break; 1556 default: ShouldNotReachHere(); 1557 } 1558 } else { 1559 assert(right->is_constant(), "must be"); 1560 const jint c = right->as_constant_ptr()->as_jint(); 1561 if (!Assembler::is_arith_imm_in_range(c)) { 1562 BAILOUT("illegal arithmetic operand"); 1563 } 1564 switch (code) { 1565 case lir_add: __ add_32(res, lreg, c); break; 1566 case lir_sub: __ sub_32(res, lreg, c); break; 1567 default: ShouldNotReachHere(); 1568 } 1569 } 1570 1571 } else if (dest->is_double_cpu()) { 1572 Register res_lo = dest->as_register_lo(); 1573 Register res_hi = dest->as_register_hi(); 1574 Register lreg_lo = left->as_register_lo(); 1575 Register lreg_hi = left->as_register_hi(); 1576 if (right->is_double_cpu()) { 1577 Register rreg_lo = right->as_register_lo(); 1578 Register rreg_hi = right->as_register_hi(); 1579 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1580 res_lo = Rtemp; 1581 } 1582 switch (code) { 1583 case lir_add: 1584 __ adds(res_lo, lreg_lo, rreg_lo); 1585 __ adc(res_hi, lreg_hi, rreg_hi); 1586 break; 1587 case lir_sub: 1588 __ subs(res_lo, lreg_lo, rreg_lo); 1589 __ sbc(res_hi, lreg_hi, rreg_hi); 1590 break; 1591 default: 1592 ShouldNotReachHere(); 1593 } 1594 } else { 1595 assert(right->is_constant(), "must be"); 1596 assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range"); 1597 const jint c = (jint) right->as_constant_ptr()->as_jlong(); 1598 if (res_lo == lreg_hi) { 1599 res_lo = Rtemp; 1600 } 1601 switch (code) { 1602 case lir_add: 1603 __ adds(res_lo, lreg_lo, c); 1604 __ adc(res_hi, lreg_hi, 0); 1605 break; 1606 case lir_sub: 1607 __ subs(res_lo, lreg_lo, c); 1608 __ sbc(res_hi, lreg_hi, 0); 1609 break; 1610 default: 1611 ShouldNotReachHere(); 1612 } 1613 } 1614 move_regs(res_lo, dest->as_register_lo()); 1615 1616 } else if (dest->is_single_fpu()) { 1617 assert(left->is_single_fpu(), "must be"); 1618 assert(right->is_single_fpu(), "must be"); 1619 const FloatRegister res = dest->as_float_reg(); 1620 const FloatRegister lreg = left->as_float_reg(); 1621 const FloatRegister rreg = right->as_float_reg(); 1622 switch (code) { 1623 case lir_add: __ add_float(res, lreg, rreg); break; 1624 case lir_sub: __ sub_float(res, lreg, rreg); break; 1625 case lir_mul: __ mul_float(res, lreg, rreg); break; 1626 case lir_div: __ div_float(res, lreg, rreg); break; 1627 default: ShouldNotReachHere(); 1628 } 1629 } else if (dest->is_double_fpu()) { 1630 assert(left->is_double_fpu(), "must be"); 1631 assert(right->is_double_fpu(), "must be"); 1632 const FloatRegister res = dest->as_double_reg(); 1633 const FloatRegister lreg = left->as_double_reg(); 1634 const FloatRegister rreg = right->as_double_reg(); 1635 switch (code) { 1636 case lir_add: __ add_double(res, lreg, rreg); break; 1637 case lir_sub: __ sub_double(res, lreg, rreg); break; 1638 case lir_mul: __ mul_double(res, lreg, rreg); break; 1639 case lir_div: __ div_double(res, lreg, rreg); break; 1640 default: ShouldNotReachHere(); 1641 } 1642 } else { 1643 ShouldNotReachHere(); 1644 } 1645 } 1646 1647 1648 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 1649 switch (code) { 1650 case lir_abs: 1651 __ abs_double(dest->as_double_reg(), value->as_double_reg()); 1652 break; 1653 case lir_sqrt: 1654 __ sqrt_double(dest->as_double_reg(), value->as_double_reg()); 1655 break; 1656 default: 1657 ShouldNotReachHere(); 1658 } 1659 } 1660 1661 1662 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1663 assert(dest->is_register(), "wrong items state"); 1664 assert(left->is_register(), "wrong items state"); 1665 1666 if (dest->is_single_cpu()) { 1667 1668 const Register res = dest->as_register(); 1669 const Register lreg = left->as_register(); 1670 1671 if (right->is_single_cpu()) { 1672 const Register rreg = right->as_register(); 1673 switch (code) { 1674 case lir_logic_and: __ and_32(res, lreg, rreg); break; 1675 case lir_logic_or: __ orr_32(res, lreg, rreg); break; 1676 case lir_logic_xor: __ eor_32(res, lreg, rreg); break; 1677 default: ShouldNotReachHere(); 1678 } 1679 } else { 1680 assert(right->is_constant(), "must be"); 1681 const uint c = (uint)right->as_constant_ptr()->as_jint(); 1682 if (!Assembler::is_arith_imm_in_range(c)) { 1683 BAILOUT("illegal arithmetic operand"); 1684 } 1685 switch (code) { 1686 case lir_logic_and: __ and_32(res, lreg, c); break; 1687 case lir_logic_or: __ orr_32(res, lreg, c); break; 1688 case lir_logic_xor: __ eor_32(res, lreg, c); break; 1689 default: ShouldNotReachHere(); 1690 } 1691 } 1692 } else { 1693 assert(dest->is_double_cpu(), "should be"); 1694 Register res_lo = dest->as_register_lo(); 1695 1696 assert (dest->type() == T_LONG, "unexpected result type"); 1697 assert (left->type() == T_LONG, "unexpected left type"); 1698 assert (right->type() == T_LONG, "unexpected right type"); 1699 1700 const Register res_hi = dest->as_register_hi(); 1701 const Register lreg_lo = left->as_register_lo(); 1702 const Register lreg_hi = left->as_register_hi(); 1703 1704 if (right->is_register()) { 1705 const Register rreg_lo = right->as_register_lo(); 1706 const Register rreg_hi = right->as_register_hi(); 1707 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1708 res_lo = Rtemp; // Temp register helps to avoid overlap between result and input 1709 } 1710 switch (code) { 1711 case lir_logic_and: 1712 __ andr(res_lo, lreg_lo, rreg_lo); 1713 __ andr(res_hi, lreg_hi, rreg_hi); 1714 break; 1715 case lir_logic_or: 1716 __ orr(res_lo, lreg_lo, rreg_lo); 1717 __ orr(res_hi, lreg_hi, rreg_hi); 1718 break; 1719 case lir_logic_xor: 1720 __ eor(res_lo, lreg_lo, rreg_lo); 1721 __ eor(res_hi, lreg_hi, rreg_hi); 1722 break; 1723 default: 1724 ShouldNotReachHere(); 1725 } 1726 move_regs(res_lo, dest->as_register_lo()); 1727 } else { 1728 assert(right->is_constant(), "must be"); 1729 const jint c_lo = (jint) right->as_constant_ptr()->as_jlong(); 1730 const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32); 1731 // Case for logic_or from do_ClassIDIntrinsic() 1732 if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) { 1733 switch (code) { 1734 case lir_logic_and: 1735 __ andr(res_lo, lreg_lo, c_lo); 1736 __ mov(res_hi, 0); 1737 break; 1738 case lir_logic_or: 1739 __ orr(res_lo, lreg_lo, c_lo); 1740 break; 1741 case lir_logic_xor: 1742 __ eor(res_lo, lreg_lo, c_lo); 1743 break; 1744 default: 1745 ShouldNotReachHere(); 1746 } 1747 } else if (code == lir_logic_and && 1748 c_hi == -1 && 1749 (AsmOperand::is_rotated_imm(c_lo) || 1750 AsmOperand::is_rotated_imm(~c_lo))) { 1751 // Another case which handles logic_and from do_ClassIDIntrinsic() 1752 if (AsmOperand::is_rotated_imm(c_lo)) { 1753 __ andr(res_lo, lreg_lo, c_lo); 1754 } else { 1755 __ bic(res_lo, lreg_lo, ~c_lo); 1756 } 1757 if (res_hi != lreg_hi) { 1758 __ mov(res_hi, lreg_hi); 1759 } 1760 } else { 1761 BAILOUT("64 bit constant cannot be inlined"); 1762 } 1763 } 1764 } 1765 } 1766 1767 1768 1769 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1770 if (opr1->is_single_cpu()) { 1771 if (opr2->is_constant()) { 1772 switch (opr2->as_constant_ptr()->type()) { 1773 case T_INT: { 1774 const jint c = opr2->as_constant_ptr()->as_jint(); 1775 if (Assembler::is_arith_imm_in_range(c)) { 1776 __ cmp_32(opr1->as_register(), c); 1777 } else if (Assembler::is_arith_imm_in_range(-c)) { 1778 __ cmn_32(opr1->as_register(), -c); 1779 } else { 1780 // This can happen when compiling lookupswitch 1781 __ mov_slow(Rtemp, c); 1782 __ cmp_32(opr1->as_register(), Rtemp); 1783 } 1784 break; 1785 } 1786 case T_OBJECT: 1787 assert(opr2->as_constant_ptr()->as_jobject() == nullptr, "cannot handle otherwise"); 1788 __ cmp(opr1->as_register(), 0); 1789 break; 1790 case T_METADATA: 1791 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests"); 1792 assert(opr2->as_constant_ptr()->as_metadata() == nullptr, "cannot handle otherwise"); 1793 __ cmp(opr1->as_register(), 0); 1794 break; 1795 default: 1796 ShouldNotReachHere(); 1797 } 1798 } else if (opr2->is_single_cpu()) { 1799 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1800 assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY, "incompatibe type"); 1801 __ cmpoop(opr1->as_register(), opr2->as_register()); 1802 } else if (opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) { 1803 assert(opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type"); 1804 __ cmp(opr1->as_register(), opr2->as_register()); 1805 } else { 1806 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type"); 1807 __ cmp_32(opr1->as_register(), opr2->as_register()); 1808 } 1809 } else { 1810 ShouldNotReachHere(); 1811 } 1812 } else if (opr1->is_double_cpu()) { 1813 Register xlo = opr1->as_register_lo(); 1814 Register xhi = opr1->as_register_hi(); 1815 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1816 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise"); 1817 __ orrs(Rtemp, xlo, xhi); 1818 } else if (opr2->is_register()) { 1819 Register ylo = opr2->as_register_lo(); 1820 Register yhi = opr2->as_register_hi(); 1821 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1822 __ teq(xhi, yhi); 1823 __ teq(xlo, ylo, eq); 1824 } else { 1825 __ subs(Rtemp, xlo, ylo); 1826 __ sbcs(Rtemp, xhi, yhi); 1827 } 1828 } else { 1829 ShouldNotReachHere(); 1830 } 1831 } else if (opr1->is_single_fpu()) { 1832 if (opr2->is_constant()) { 1833 assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise"); 1834 __ cmp_zero_float(opr1->as_float_reg()); 1835 } else { 1836 __ cmp_float(opr1->as_float_reg(), opr2->as_float_reg()); 1837 } 1838 } else if (opr1->is_double_fpu()) { 1839 if (opr2->is_constant()) { 1840 assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise"); 1841 __ cmp_zero_double(opr1->as_double_reg()); 1842 } else { 1843 __ cmp_double(opr1->as_double_reg(), opr2->as_double_reg()); 1844 } 1845 } else { 1846 ShouldNotReachHere(); 1847 } 1848 } 1849 1850 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1851 const Register res = dst->as_register(); 1852 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1853 comp_op(lir_cond_unknown, left, right, op); 1854 __ fmstat(); 1855 if (code == lir_ucmp_fd2i) { // unordered is less 1856 __ mvn(res, 0, lt); 1857 __ mov(res, 1, ge); 1858 } else { // unordered is greater 1859 __ mov(res, 1, cs); 1860 __ mvn(res, 0, cc); 1861 } 1862 __ mov(res, 0, eq); 1863 1864 } else { 1865 assert(code == lir_cmp_l2i, "must be"); 1866 1867 Label done; 1868 const Register xlo = left->as_register_lo(); 1869 const Register xhi = left->as_register_hi(); 1870 const Register ylo = right->as_register_lo(); 1871 const Register yhi = right->as_register_hi(); 1872 __ cmp(xhi, yhi); 1873 __ mov(res, 1, gt); 1874 __ mvn(res, 0, lt); 1875 __ b(done, ne); 1876 __ subs(res, xlo, ylo); 1877 __ mov(res, 1, hi); 1878 __ mvn(res, 0, lo); 1879 __ bind(done); 1880 } 1881 } 1882 1883 1884 void LIR_Assembler::align_call(LIR_Code code) { 1885 // Not needed 1886 } 1887 1888 1889 void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) { 1890 int ret_addr_offset = __ patchable_call(op->addr(), rtype); 1891 assert(ret_addr_offset == __ offset(), "embedded return address not allowed"); 1892 add_call_info_here(op->info()); 1893 } 1894 1895 1896 void LIR_Assembler::ic_call(LIR_OpJavaCall *op) { 1897 bool near_range = __ cache_fully_reachable(); 1898 address oop_address = pc(); 1899 1900 bool use_movw = VM_Version::supports_movw(); 1901 1902 // Ricklass may contain something that is not a metadata pointer so 1903 // mov_metadata can't be used 1904 InlinedAddress value((address)Universe::non_oop_word()); 1905 InlinedAddress addr(op->addr()); 1906 if (use_movw) { 1907 __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff); 1908 __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16); 1909 } else { 1910 // No movw/movt, must be load a pc relative value but no 1911 // relocation so no metadata table to load from. 1912 // Use a b instruction rather than a bl, inline constant after the 1913 // branch, use a PC relative ldr to load the constant, arrange for 1914 // the call to return after the constant(s). 1915 __ ldr_literal(Ricklass, value); 1916 } 1917 __ relocate(virtual_call_Relocation::spec(oop_address)); 1918 if (near_range && use_movw) { 1919 __ bl(op->addr()); 1920 } else { 1921 Label call_return; 1922 __ adr(LR, call_return); 1923 if (near_range) { 1924 __ b(op->addr()); 1925 } else { 1926 __ indirect_jump(addr, Rtemp); 1927 __ bind_literal(addr); 1928 } 1929 if (!use_movw) { 1930 __ bind_literal(value); 1931 } 1932 __ bind(call_return); 1933 } 1934 add_call_info(code_offset(), op->info()); 1935 } 1936 1937 void LIR_Assembler::emit_static_call_stub() { 1938 address call_pc = __ pc(); 1939 address stub = __ start_a_stub(call_stub_size()); 1940 if (stub == nullptr) { 1941 BAILOUT("static call stub overflow"); 1942 } 1943 1944 DEBUG_ONLY(int offset = code_offset();) 1945 1946 InlinedMetadata metadata_literal(nullptr); 1947 __ relocate(static_stub_Relocation::spec(call_pc)); 1948 // If not a single instruction, NativeMovConstReg::next_instruction_address() 1949 // must jump over the whole following ldr_literal. 1950 // (See CompiledDirectCall::set_to_interpreted()) 1951 #ifdef ASSERT 1952 address ldr_site = __ pc(); 1953 #endif 1954 __ ldr_literal(Rmethod, metadata_literal); 1955 assert(nativeMovConstReg_at(ldr_site)->next_instruction_address() == __ pc(), "Fix ldr_literal or its parsing"); 1956 bool near_range = __ cache_fully_reachable(); 1957 InlinedAddress dest((address)-1); 1958 if (near_range) { 1959 address branch_site = __ pc(); 1960 __ b(branch_site); // b to self maps to special NativeJump -1 destination 1961 } else { 1962 __ indirect_jump(dest, Rtemp); 1963 } 1964 __ bind_literal(metadata_literal); // includes spec_for_immediate reloc 1965 if (!near_range) { 1966 __ bind_literal(dest); // special NativeJump -1 destination 1967 } 1968 1969 assert(code_offset() - offset <= call_stub_size(), "overflow"); 1970 __ end_a_stub(); 1971 } 1972 1973 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1974 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1975 assert(exceptionPC->as_register() == Rexception_pc, "must match"); 1976 info->add_register_oop(exceptionOop); 1977 1978 Runtime1::StubID handle_id = compilation()->has_fpu_code() ? 1979 Runtime1::handle_exception_id : 1980 Runtime1::handle_exception_nofpu_id; 1981 Label return_address; 1982 __ adr(Rexception_pc, return_address); 1983 __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type); 1984 __ bind(return_address); 1985 add_call_info_here(info); // for exception handler 1986 } 1987 1988 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1989 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1990 __ b(_unwind_handler_entry); 1991 } 1992 1993 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 1994 AsmShift shift = lsl; 1995 switch (code) { 1996 case lir_shl: shift = lsl; break; 1997 case lir_shr: shift = asr; break; 1998 case lir_ushr: shift = lsr; break; 1999 default: ShouldNotReachHere(); 2000 } 2001 2002 if (dest->is_single_cpu()) { 2003 __ andr(Rtemp, count->as_register(), 31); 2004 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp)); 2005 } else if (dest->is_double_cpu()) { 2006 Register dest_lo = dest->as_register_lo(); 2007 Register dest_hi = dest->as_register_hi(); 2008 Register src_lo = left->as_register_lo(); 2009 Register src_hi = left->as_register_hi(); 2010 Register Rcount = count->as_register(); 2011 // Resolve possible register conflicts 2012 if (shift == lsl && dest_hi == src_lo) { 2013 dest_hi = Rtemp; 2014 } else if (shift != lsl && dest_lo == src_hi) { 2015 dest_lo = Rtemp; 2016 } else if (dest_lo == src_lo && dest_hi == src_hi) { 2017 dest_lo = Rtemp; 2018 } else if (dest_lo == Rcount || dest_hi == Rcount) { 2019 Rcount = Rtemp; 2020 } 2021 __ andr(Rcount, count->as_register(), 63); 2022 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount); 2023 move_regs(dest_lo, dest->as_register_lo()); 2024 move_regs(dest_hi, dest->as_register_hi()); 2025 } else { 2026 ShouldNotReachHere(); 2027 } 2028 } 2029 2030 2031 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2032 AsmShift shift = lsl; 2033 switch (code) { 2034 case lir_shl: shift = lsl; break; 2035 case lir_shr: shift = asr; break; 2036 case lir_ushr: shift = lsr; break; 2037 default: ShouldNotReachHere(); 2038 } 2039 2040 if (dest->is_single_cpu()) { 2041 count &= 31; 2042 if (count != 0) { 2043 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count)); 2044 } else { 2045 move_regs(left->as_register(), dest->as_register()); 2046 } 2047 } else if (dest->is_double_cpu()) { 2048 count &= 63; 2049 if (count != 0) { 2050 Register dest_lo = dest->as_register_lo(); 2051 Register dest_hi = dest->as_register_hi(); 2052 Register src_lo = left->as_register_lo(); 2053 Register src_hi = left->as_register_hi(); 2054 // Resolve possible register conflicts 2055 if (shift == lsl && dest_hi == src_lo) { 2056 dest_hi = Rtemp; 2057 } else if (shift != lsl && dest_lo == src_hi) { 2058 dest_lo = Rtemp; 2059 } 2060 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count); 2061 move_regs(dest_lo, dest->as_register_lo()); 2062 move_regs(dest_hi, dest->as_register_hi()); 2063 } else { 2064 __ long_move(dest->as_register_lo(), dest->as_register_hi(), 2065 left->as_register_lo(), left->as_register_hi()); 2066 } 2067 } else { 2068 ShouldNotReachHere(); 2069 } 2070 } 2071 2072 2073 // Saves 4 given registers in reserved argument area. 2074 void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2075 verify_reserved_argument_area_size(4); 2076 __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4)); 2077 } 2078 2079 // Restores 4 given registers from reserved argument area. 2080 void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2081 __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback); 2082 } 2083 2084 2085 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2086 ciArrayKlass* default_type = op->expected_type(); 2087 Register src = op->src()->as_register(); 2088 Register src_pos = op->src_pos()->as_register(); 2089 Register dst = op->dst()->as_register(); 2090 Register dst_pos = op->dst_pos()->as_register(); 2091 Register length = op->length()->as_register(); 2092 Register tmp = op->tmp()->as_register(); 2093 Register tmp2 = Rtemp; 2094 2095 assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption"); 2096 2097 CodeStub* stub = op->stub(); 2098 2099 int flags = op->flags(); 2100 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 2101 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2102 2103 // If we don't know anything or it's an object array, just go through the generic arraycopy 2104 if (default_type == nullptr) { 2105 2106 // save arguments, because they will be killed by a runtime call 2107 save_in_reserved_area(R0, R1, R2, R3); 2108 2109 // pass length argument on SP[0] 2110 __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment 2111 2112 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2113 assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); 2114 #ifndef PRODUCT 2115 if (PrintC1Statistics) { 2116 __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2); 2117 } 2118 #endif // !PRODUCT 2119 // the stub is in the code cache so close enough 2120 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2121 2122 __ add(SP, SP, 2*wordSize); 2123 2124 __ cbz_32(R0, *stub->continuation()); 2125 2126 __ mvn_32(tmp, R0); 2127 restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only 2128 __ sub_32(length, length, tmp); 2129 __ add_32(src_pos, src_pos, tmp); 2130 __ add_32(dst_pos, dst_pos, tmp); 2131 2132 __ b(*stub->entry()); 2133 2134 __ bind(*stub->continuation()); 2135 return; 2136 } 2137 2138 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), 2139 "must be true at this point"); 2140 int elem_size = type2aelembytes(basic_type); 2141 int shift = exact_log2(elem_size); 2142 2143 // Check for null 2144 if (flags & LIR_OpArrayCopy::src_null_check) { 2145 if (flags & LIR_OpArrayCopy::dst_null_check) { 2146 __ cmp(src, 0); 2147 __ cond_cmp(dst, 0, ne); // make one instruction shorter if both checks are needed 2148 __ b(*stub->entry(), eq); 2149 } else { 2150 __ cbz(src, *stub->entry()); 2151 } 2152 } else if (flags & LIR_OpArrayCopy::dst_null_check) { 2153 __ cbz(dst, *stub->entry()); 2154 } 2155 2156 // If the compiler was not able to prove that exact type of the source or the destination 2157 // of the arraycopy is an array type, check at runtime if the source or the destination is 2158 // an instance type. 2159 if (flags & LIR_OpArrayCopy::type_check) { 2160 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2161 __ load_klass(tmp, dst); 2162 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2163 __ mov_slow(tmp, Klass::_lh_neutral_value); 2164 __ cmp_32(tmp2, tmp); 2165 __ b(*stub->entry(), ge); 2166 } 2167 2168 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2169 __ load_klass(tmp, src); 2170 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2171 __ mov_slow(tmp, Klass::_lh_neutral_value); 2172 __ cmp_32(tmp2, tmp); 2173 __ b(*stub->entry(), ge); 2174 } 2175 } 2176 2177 // Check if negative 2178 const int all_positive_checks = LIR_OpArrayCopy::src_pos_positive_check | 2179 LIR_OpArrayCopy::dst_pos_positive_check | 2180 LIR_OpArrayCopy::length_positive_check; 2181 switch (flags & all_positive_checks) { 2182 case LIR_OpArrayCopy::src_pos_positive_check: 2183 __ branch_if_negative_32(src_pos, *stub->entry()); 2184 break; 2185 case LIR_OpArrayCopy::dst_pos_positive_check: 2186 __ branch_if_negative_32(dst_pos, *stub->entry()); 2187 break; 2188 case LIR_OpArrayCopy::length_positive_check: 2189 __ branch_if_negative_32(length, *stub->entry()); 2190 break; 2191 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::dst_pos_positive_check: 2192 __ branch_if_any_negative_32(src_pos, dst_pos, tmp, *stub->entry()); 2193 break; 2194 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2195 __ branch_if_any_negative_32(src_pos, length, tmp, *stub->entry()); 2196 break; 2197 case LIR_OpArrayCopy::dst_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2198 __ branch_if_any_negative_32(dst_pos, length, tmp, *stub->entry()); 2199 break; 2200 case all_positive_checks: 2201 __ branch_if_any_negative_32(src_pos, dst_pos, length, tmp, *stub->entry()); 2202 break; 2203 default: 2204 assert((flags & all_positive_checks) == 0, "the last option"); 2205 } 2206 2207 // Range checks 2208 if (flags & LIR_OpArrayCopy::src_range_check) { 2209 __ ldr_s32(tmp2, Address(src, arrayOopDesc::length_offset_in_bytes())); 2210 __ add_32(tmp, src_pos, length); 2211 __ cmp_32(tmp, tmp2); 2212 __ b(*stub->entry(), hi); 2213 } 2214 if (flags & LIR_OpArrayCopy::dst_range_check) { 2215 __ ldr_s32(tmp2, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2216 __ add_32(tmp, dst_pos, length); 2217 __ cmp_32(tmp, tmp2); 2218 __ b(*stub->entry(), hi); 2219 } 2220 2221 // Check if src and dst are of the same type 2222 if (flags & LIR_OpArrayCopy::type_check) { 2223 // We don't know the array types are compatible 2224 if (basic_type != T_OBJECT) { 2225 // Simple test for basic type arrays 2226 if (UseCompressedClassPointers) { 2227 // We don't need decode because we just need to compare 2228 __ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes())); 2229 __ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes())); 2230 __ cmp_32(tmp, tmp2); 2231 } else { 2232 __ load_klass(tmp, src); 2233 __ load_klass(tmp2, dst); 2234 __ cmp(tmp, tmp2); 2235 } 2236 __ b(*stub->entry(), ne); 2237 } else { 2238 // For object arrays, if src is a sub class of dst then we can 2239 // safely do the copy. 2240 Label cont, slow; 2241 2242 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2243 2244 __ load_klass(tmp, src); 2245 __ load_klass(tmp2, dst); 2246 2247 // We are at a call so all live registers are saved before we 2248 // get here 2249 assert_different_registers(tmp, tmp2, R6, altFP_7_11); 2250 2251 __ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == nullptr ? stub->entry() : &slow, nullptr); 2252 2253 __ mov(R6, R0); 2254 __ mov(altFP_7_11, R1); 2255 __ mov(R0, tmp); 2256 __ mov(R1, tmp2); 2257 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp 2258 __ cmp_32(R0, 0); 2259 __ mov(R0, R6); 2260 __ mov(R1, altFP_7_11); 2261 2262 if (copyfunc_addr != nullptr) { // use stub if available 2263 // src is not a sub class of dst so we have to do a 2264 // per-element check. 2265 2266 __ b(cont, ne); 2267 2268 __ bind(slow); 2269 2270 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2271 if ((flags & mask) != mask) { 2272 // Check that at least both of them object arrays. 2273 assert(flags & mask, "one of the two should be known to be an object array"); 2274 2275 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2276 __ load_klass(tmp, src); 2277 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2278 __ load_klass(tmp, dst); 2279 } 2280 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2281 2282 __ ldr_u32(tmp2, Address(tmp, lh_offset)); 2283 2284 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2285 __ mov_slow(tmp, objArray_lh); 2286 __ cmp_32(tmp, tmp2); 2287 __ b(*stub->entry(), ne); 2288 } 2289 2290 save_in_reserved_area(R0, R1, R2, R3); 2291 2292 Register src_ptr = R0; 2293 Register dst_ptr = R1; 2294 Register len = R2; 2295 Register chk_off = R3; 2296 Register super_k = tmp; 2297 2298 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2299 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2300 2301 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2302 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2303 __ load_klass(tmp, dst); 2304 2305 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2306 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2307 2308 __ ldr(super_k, Address(tmp, ek_offset)); 2309 2310 __ mov(len, length); 2311 __ ldr_u32(chk_off, Address(super_k, sco_offset)); 2312 __ push(super_k); 2313 2314 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2315 2316 #ifndef PRODUCT 2317 if (PrintC1Statistics) { 2318 Label failed; 2319 __ cbnz_32(R0, failed); 2320 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2); 2321 __ bind(failed); 2322 } 2323 #endif // PRODUCT 2324 2325 __ add(SP, SP, wordSize); // Drop super_k argument 2326 2327 __ cbz_32(R0, *stub->continuation()); 2328 __ mvn_32(tmp, R0); 2329 2330 // load saved arguments in slow case only 2331 restore_from_reserved_area(R0, R1, R2, R3); 2332 2333 __ sub_32(length, length, tmp); 2334 __ add_32(src_pos, src_pos, tmp); 2335 __ add_32(dst_pos, dst_pos, tmp); 2336 2337 #ifndef PRODUCT 2338 if (PrintC1Statistics) { 2339 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2); 2340 } 2341 #endif 2342 2343 __ b(*stub->entry()); 2344 2345 __ bind(cont); 2346 } else { 2347 __ b(*stub->entry(), eq); 2348 __ bind(cont); 2349 } 2350 } 2351 } 2352 2353 #ifndef PRODUCT 2354 if (PrintC1Statistics) { 2355 address counter = Runtime1::arraycopy_count_address(basic_type); 2356 __ inc_counter(counter, tmp, tmp2); 2357 } 2358 #endif // !PRODUCT 2359 2360 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2361 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2362 const char *name; 2363 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2364 2365 Register src_ptr = R0; 2366 Register dst_ptr = R1; 2367 Register len = R2; 2368 2369 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2370 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2371 2372 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2373 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2374 2375 __ mov(len, length); 2376 2377 __ call(entry, relocInfo::runtime_call_type); 2378 2379 __ bind(*stub->continuation()); 2380 } 2381 2382 #ifdef ASSERT 2383 // emit run-time assertion 2384 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2385 assert(op->code() == lir_assert, "must be"); 2386 2387 if (op->in_opr1()->is_valid()) { 2388 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 2389 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 2390 } else { 2391 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 2392 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 2393 } 2394 2395 Label ok; 2396 if (op->condition() != lir_cond_always) { 2397 AsmCondition acond = al; 2398 switch (op->condition()) { 2399 case lir_cond_equal: acond = eq; break; 2400 case lir_cond_notEqual: acond = ne; break; 2401 case lir_cond_less: acond = lt; break; 2402 case lir_cond_lessEqual: acond = le; break; 2403 case lir_cond_greaterEqual: acond = ge; break; 2404 case lir_cond_greater: acond = gt; break; 2405 case lir_cond_aboveEqual: acond = hs; break; 2406 case lir_cond_belowEqual: acond = ls; break; 2407 default: ShouldNotReachHere(); 2408 } 2409 __ b(ok, acond); 2410 } 2411 if (op->halt()) { 2412 const char* str = __ code_string(op->msg()); 2413 __ stop(str); 2414 } else { 2415 breakpoint(); 2416 } 2417 __ bind(ok); 2418 } 2419 #endif // ASSERT 2420 2421 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2422 fatal("CRC32 intrinsic is not implemented on this platform"); 2423 } 2424 2425 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2426 Register obj = op->obj_opr()->as_pointer_register(); 2427 Register hdr = op->hdr_opr()->as_pointer_register(); 2428 Register lock = op->lock_opr()->as_pointer_register(); 2429 2430 if (LockingMode == LM_MONITOR) { 2431 if (op->info() != nullptr) { 2432 add_debug_info_for_null_check_here(op->info()); 2433 __ null_check(obj); 2434 } 2435 __ b(*op->stub()->entry()); 2436 } else if (op->code() == lir_lock) { 2437 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2438 int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2439 if (op->info() != nullptr) { 2440 add_debug_info_for_null_check(null_check_offset, op->info()); 2441 } 2442 } else if (op->code() == lir_unlock) { 2443 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2444 } else { 2445 ShouldNotReachHere(); 2446 } 2447 __ bind(*op->stub()->continuation()); 2448 } 2449 2450 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 2451 Register obj = op->obj()->as_pointer_register(); 2452 Register result = op->result_opr()->as_pointer_register(); 2453 2454 CodeEmitInfo* info = op->info(); 2455 if (info != nullptr) { 2456 add_debug_info_for_null_check_here(info); 2457 } 2458 2459 if (UseCompressedClassPointers) { // On 32 bit arm?? 2460 __ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2461 } else { 2462 __ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2463 } 2464 } 2465 2466 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2467 ciMethod* method = op->profiled_method(); 2468 int bci = op->profiled_bci(); 2469 ciMethod* callee = op->profiled_callee(); 2470 2471 // Update counter for all call types 2472 ciMethodData* md = method->method_data_or_null(); 2473 assert(md != nullptr, "Sanity"); 2474 ciProfileData* data = md->bci_to_data(bci); 2475 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 2476 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2477 Register mdo = op->mdo()->as_register(); 2478 assert(op->tmp1()->is_register(), "tmp1 must be allocated"); 2479 Register tmp1 = op->tmp1()->as_pointer_register(); 2480 assert_different_registers(mdo, tmp1); 2481 __ mov_metadata(mdo, md->constant_encoding()); 2482 int mdo_offset_bias = 0; 2483 int max_offset = 4096; 2484 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) { 2485 // The offset is large so bias the mdo by the base of the slot so 2486 // that the ldr can use an immediate offset to reference the slots of the data 2487 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2488 __ mov_slow(tmp1, mdo_offset_bias); 2489 __ add(mdo, mdo, tmp1); 2490 } 2491 2492 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2493 // Perform additional virtual call profiling for invokevirtual and 2494 // invokeinterface bytecodes 2495 if (op->should_profile_receiver_type()) { 2496 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2497 Register recv = op->recv()->as_register(); 2498 assert_different_registers(mdo, tmp1, recv); 2499 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2500 ciKlass* known_klass = op->known_holder(); 2501 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 2502 // We know the type that will be seen at this call site; we can 2503 // statically update the MethodData* rather than needing to do 2504 // dynamic tests on the receiver type 2505 2506 // NOTE: we should probably put a lock around this search to 2507 // avoid collisions by concurrent compilations 2508 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2509 uint i; 2510 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2511 ciKlass* receiver = vc_data->receiver(i); 2512 if (known_klass->equals(receiver)) { 2513 Address data_addr(mdo, md->byte_offset_of_slot(data, 2514 VirtualCallData::receiver_count_offset(i)) - 2515 mdo_offset_bias); 2516 __ ldr(tmp1, data_addr); 2517 __ add(tmp1, tmp1, DataLayout::counter_increment); 2518 __ str(tmp1, data_addr); 2519 return; 2520 } 2521 } 2522 2523 // Receiver type not found in profile data; select an empty slot 2524 2525 // Note that this is less efficient than it should be because it 2526 // always does a write to the receiver part of the 2527 // VirtualCallData rather than just the first time 2528 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2529 ciKlass* receiver = vc_data->receiver(i); 2530 if (receiver == nullptr) { 2531 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 2532 mdo_offset_bias); 2533 __ mov_metadata(tmp1, known_klass->constant_encoding()); 2534 __ str(tmp1, recv_addr); 2535 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 2536 mdo_offset_bias); 2537 __ ldr(tmp1, data_addr); 2538 __ add(tmp1, tmp1, DataLayout::counter_increment); 2539 __ str(tmp1, data_addr); 2540 return; 2541 } 2542 } 2543 } else { 2544 __ load_klass(recv, recv); 2545 Label update_done; 2546 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2547 // Receiver did not match any saved receiver and there is no empty row for it. 2548 // Increment total counter to indicate polymorphic case. 2549 __ ldr(tmp1, counter_addr); 2550 __ add(tmp1, tmp1, DataLayout::counter_increment); 2551 __ str(tmp1, counter_addr); 2552 2553 __ bind(update_done); 2554 } 2555 } else { 2556 // Static call 2557 __ ldr(tmp1, counter_addr); 2558 __ add(tmp1, tmp1, DataLayout::counter_increment); 2559 __ str(tmp1, counter_addr); 2560 } 2561 } 2562 2563 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2564 fatal("Type profiling not implemented on this platform"); 2565 } 2566 2567 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) { 2568 Unimplemented(); 2569 } 2570 2571 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 2572 Unimplemented(); 2573 } 2574 2575 2576 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 2577 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2578 __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp()); 2579 } 2580 2581 2582 void LIR_Assembler::align_backward_branch_target() { 2583 // Some ARM processors do better with 8-byte branch target alignment 2584 __ align(8); 2585 } 2586 2587 2588 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2589 // tmp must be unused 2590 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2591 2592 if (left->is_single_cpu()) { 2593 assert (dest->type() == T_INT, "unexpected result type"); 2594 assert (left->type() == T_INT, "unexpected left type"); 2595 __ neg_32(dest->as_register(), left->as_register()); 2596 } else if (left->is_double_cpu()) { 2597 Register dest_lo = dest->as_register_lo(); 2598 Register dest_hi = dest->as_register_hi(); 2599 Register src_lo = left->as_register_lo(); 2600 Register src_hi = left->as_register_hi(); 2601 if (dest_lo == src_hi) { 2602 dest_lo = Rtemp; 2603 } 2604 __ rsbs(dest_lo, src_lo, 0); 2605 __ rsc(dest_hi, src_hi, 0); 2606 move_regs(dest_lo, dest->as_register_lo()); 2607 } else if (left->is_single_fpu()) { 2608 __ neg_float(dest->as_float_reg(), left->as_float_reg()); 2609 } else if (left->is_double_fpu()) { 2610 __ neg_double(dest->as_double_reg(), left->as_double_reg()); 2611 } else { 2612 ShouldNotReachHere(); 2613 } 2614 } 2615 2616 2617 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2618 assert(patch_code == lir_patch_none, "Patch code not supported"); 2619 LIR_Address* addr = addr_opr->as_address_ptr(); 2620 if (addr->index()->is_illegal()) { 2621 jint c = addr->disp(); 2622 if (!Assembler::is_arith_imm_in_range(c)) { 2623 BAILOUT("illegal arithmetic operand"); 2624 } 2625 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c); 2626 } else { 2627 assert(addr->disp() == 0, "cannot handle otherwise"); 2628 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), 2629 AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale())); 2630 } 2631 } 2632 2633 2634 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2635 assert(!tmp->is_valid(), "don't need temporary"); 2636 __ call(dest); 2637 if (info != nullptr) { 2638 add_call_info_here(info); 2639 } 2640 } 2641 2642 2643 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2644 assert((src->is_double_cpu() && dest->is_address()) || 2645 (src->is_address() && dest->is_double_cpu()), 2646 "Simple move_op is called for all other cases"); 2647 2648 int null_check_offset; 2649 if (dest->is_address()) { 2650 // Store 2651 const LIR_Address* addr = dest->as_address_ptr(); 2652 const Register src_lo = src->as_register_lo(); 2653 const Register src_hi = src->as_register_hi(); 2654 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2655 2656 if (src_lo < src_hi) { 2657 null_check_offset = __ offset(); 2658 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi)); 2659 } else { 2660 assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2661 __ mov(Rtemp, src_hi); 2662 null_check_offset = __ offset(); 2663 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp)); 2664 } 2665 } else { 2666 // Load 2667 const LIR_Address* addr = src->as_address_ptr(); 2668 const Register dest_lo = dest->as_register_lo(); 2669 const Register dest_hi = dest->as_register_hi(); 2670 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2671 2672 null_check_offset = __ offset(); 2673 if (dest_lo < dest_hi) { 2674 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi)); 2675 } else { 2676 assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2677 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp)); 2678 __ mov(dest_hi, Rtemp); 2679 } 2680 } 2681 2682 if (info != nullptr) { 2683 add_debug_info_for_null_check(null_check_offset, info); 2684 } 2685 } 2686 2687 2688 void LIR_Assembler::membar() { 2689 __ membar(MacroAssembler::StoreLoad, Rtemp); 2690 } 2691 2692 void LIR_Assembler::membar_acquire() { 2693 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 2694 } 2695 2696 void LIR_Assembler::membar_release() { 2697 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2698 } 2699 2700 void LIR_Assembler::membar_loadload() { 2701 __ membar(MacroAssembler::LoadLoad, Rtemp); 2702 } 2703 2704 void LIR_Assembler::membar_storestore() { 2705 __ membar(MacroAssembler::StoreStore, Rtemp); 2706 } 2707 2708 void LIR_Assembler::membar_loadstore() { 2709 __ membar(MacroAssembler::LoadStore, Rtemp); 2710 } 2711 2712 void LIR_Assembler::membar_storeload() { 2713 __ membar(MacroAssembler::StoreLoad, Rtemp); 2714 } 2715 2716 void LIR_Assembler::on_spin_wait() { 2717 Unimplemented(); 2718 } 2719 2720 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2721 // Not used on ARM 2722 Unimplemented(); 2723 } 2724 2725 void LIR_Assembler::peephole(LIR_List* lir) { 2726 LIR_OpList* inst = lir->instructions_list(); 2727 const int inst_length = inst->length(); 2728 for (int i = 0; i < inst_length; i++) { 2729 LIR_Op* op = inst->at(i); 2730 switch (op->code()) { 2731 case lir_cmp: { 2732 // Replace: 2733 // cmp rX, y 2734 // cmove [EQ] y, z, rX 2735 // with 2736 // cmp rX, y 2737 // cmove [EQ] illegalOpr, z, rX 2738 // 2739 // or 2740 // cmp rX, y 2741 // cmove [NE] z, y, rX 2742 // with 2743 // cmp rX, y 2744 // cmove [NE] z, illegalOpr, rX 2745 // 2746 // moves from illegalOpr should be removed when converting LIR to native assembly 2747 2748 LIR_Op2* cmp = op->as_Op2(); 2749 assert(cmp != nullptr, "cmp LIR instruction is not an op2"); 2750 2751 if (i + 1 < inst_length) { 2752 LIR_Op2* cmove = inst->at(i + 1)->as_Op2(); 2753 if (cmove != nullptr && cmove->code() == lir_cmove) { 2754 LIR_Opr cmove_res = cmove->result_opr(); 2755 bool res_is_op1 = cmove_res == cmp->in_opr1(); 2756 bool res_is_op2 = cmove_res == cmp->in_opr2(); 2757 LIR_Opr cmp_res, cmp_arg; 2758 if (res_is_op1) { 2759 cmp_res = cmp->in_opr1(); 2760 cmp_arg = cmp->in_opr2(); 2761 } else if (res_is_op2) { 2762 cmp_res = cmp->in_opr2(); 2763 cmp_arg = cmp->in_opr1(); 2764 } else { 2765 cmp_res = LIR_OprFact::illegalOpr; 2766 cmp_arg = LIR_OprFact::illegalOpr; 2767 } 2768 2769 if (cmp_res != LIR_OprFact::illegalOpr) { 2770 LIR_Condition cond = cmove->condition(); 2771 if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) { 2772 cmove->set_in_opr1(LIR_OprFact::illegalOpr); 2773 } else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) { 2774 cmove->set_in_opr2(LIR_OprFact::illegalOpr); 2775 } 2776 } 2777 } 2778 } 2779 break; 2780 } 2781 2782 default: 2783 break; 2784 } 2785 } 2786 } 2787 2788 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2789 assert(src->is_address(), "sanity"); 2790 Address addr = as_Address(src->as_address_ptr()); 2791 2792 if (code == lir_xchg) { 2793 } else { 2794 assert (!data->is_oop(), "xadd for oops"); 2795 } 2796 2797 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2798 2799 Label retry; 2800 __ bind(retry); 2801 2802 if (data->type() == T_INT || data->is_oop()) { 2803 Register dst = dest->as_register(); 2804 Register new_val = noreg; 2805 __ ldrex(dst, addr); 2806 if (code == lir_xadd) { 2807 Register tmp_reg = tmp->as_register(); 2808 if (data->is_constant()) { 2809 assert_different_registers(dst, tmp_reg); 2810 __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint()); 2811 } else { 2812 assert_different_registers(dst, tmp_reg, data->as_register()); 2813 __ add_32(tmp_reg, dst, data->as_register()); 2814 } 2815 new_val = tmp_reg; 2816 } else { 2817 if (UseCompressedOops && data->is_oop()) { 2818 new_val = tmp->as_pointer_register(); 2819 } else { 2820 new_val = data->as_register(); 2821 } 2822 assert_different_registers(dst, new_val); 2823 } 2824 __ strex(Rtemp, new_val, addr); 2825 2826 } else if (data->type() == T_LONG) { 2827 Register dst_lo = dest->as_register_lo(); 2828 Register new_val_lo = noreg; 2829 Register dst_hi = dest->as_register_hi(); 2830 2831 assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair"); 2832 assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2833 2834 __ bind(retry); 2835 __ ldrexd(dst_lo, addr); 2836 if (code == lir_xadd) { 2837 Register tmp_lo = tmp->as_register_lo(); 2838 Register tmp_hi = tmp->as_register_hi(); 2839 2840 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 2841 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2842 2843 if (data->is_constant()) { 2844 jlong c = data->as_constant_ptr()->as_jlong(); 2845 assert((jlong)((jint)c) == c, "overflow"); 2846 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi); 2847 __ adds(tmp_lo, dst_lo, (jint)c); 2848 __ adc(tmp_hi, dst_hi, 0); 2849 } else { 2850 Register new_val_lo = data->as_register_lo(); 2851 Register new_val_hi = data->as_register_hi(); 2852 __ adds(tmp_lo, dst_lo, new_val_lo); 2853 __ adc(tmp_hi, dst_hi, new_val_hi); 2854 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi); 2855 } 2856 new_val_lo = tmp_lo; 2857 } else { 2858 new_val_lo = data->as_register_lo(); 2859 Register new_val_hi = data->as_register_hi(); 2860 2861 assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi); 2862 assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair"); 2863 assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2864 } 2865 __ strexd(Rtemp, new_val_lo, addr); 2866 } else { 2867 ShouldNotReachHere(); 2868 } 2869 2870 __ cbnz_32(Rtemp, retry); 2871 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 2872 2873 } 2874 2875 #undef __