1 /* 2 * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "memory/universe.hpp" 36 #include "nativeInst_arm.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/powerOfTwo.hpp" 42 #include "vmreg_arm.inline.hpp" 43 44 #define __ _masm-> 45 46 // Note: Rtemp usage is this file should not impact C2 and should be 47 // correct as long as it is not implicitly used in lower layers (the 48 // arm [macro]assembler) and used with care in the other C1 specific 49 // files. 50 51 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 52 ShouldNotCallThis(); // Not used on ARM 53 return false; 54 } 55 56 57 LIR_Opr LIR_Assembler::receiverOpr() { 58 // The first register in Java calling conventions 59 return FrameMap::R0_oop_opr; 60 } 61 62 LIR_Opr LIR_Assembler::osrBufferPointer() { 63 return FrameMap::as_pointer_opr(R0); 64 } 65 66 #ifndef PRODUCT 67 void LIR_Assembler::verify_reserved_argument_area_size(int args_count) { 68 assert(args_count * wordSize <= frame_map()->reserved_argument_area_size(), "not enough space for arguments"); 69 } 70 #endif // !PRODUCT 71 72 void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) { 73 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 74 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 75 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 76 __ mov_slow(Rtemp, c); 77 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 78 } 79 80 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_sp_in_words) { 81 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 82 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 83 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 84 __ mov_metadata(Rtemp, m); 85 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 86 } 87 88 //--------------fpu register translations----------------------- 89 90 91 void LIR_Assembler::breakpoint() { 92 __ breakpoint(); 93 } 94 95 void LIR_Assembler::push(LIR_Opr opr) { 96 Unimplemented(); 97 } 98 99 void LIR_Assembler::pop(LIR_Opr opr) { 100 Unimplemented(); 101 } 102 103 //------------------------------------------- 104 Address LIR_Assembler::as_Address(LIR_Address* addr) { 105 Register base = addr->base()->as_pointer_register(); 106 107 108 if (addr->index()->is_illegal() || addr->index()->is_constant()) { 109 int offset = addr->disp(); 110 if (addr->index()->is_constant()) { 111 offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale(); 112 } 113 114 if ((offset <= -4096) || (offset >= 4096)) { 115 BAILOUT_("offset not in range", Address(base)); 116 } 117 118 return Address(base, offset); 119 120 } else { 121 assert(addr->disp() == 0, "can't have both"); 122 int scale = addr->scale(); 123 124 assert(addr->index()->is_single_cpu(), "should be"); 125 return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) : 126 Address(base, addr->index()->as_register(), lsr, -scale); 127 } 128 } 129 130 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 131 Address base = as_Address(addr); 132 assert(base.index() == noreg, "must be"); 133 if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); } 134 return Address(base.base(), base.disp() + BytesPerWord); 135 } 136 137 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 138 return as_Address(addr); 139 } 140 141 142 void LIR_Assembler::osr_entry() { 143 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 144 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 145 ValueStack* entry_state = osr_entry->end()->state(); 146 int number_of_locks = entry_state->locks_size(); 147 148 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 149 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 150 151 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 152 int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord; 153 for (int i = 0; i < number_of_locks; i++) { 154 int slot_offset = monitor_offset - (i * 2 * BytesPerWord); 155 __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord)); 156 __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 157 __ str(R1, frame_map()->address_for_monitor_lock(i)); 158 __ str(R2, frame_map()->address_for_monitor_object(i)); 159 } 160 } 161 162 163 int LIR_Assembler::check_icache() { 164 Register receiver = LIR_Assembler::receiverOpr()->as_register(); 165 int offset = __ offset(); 166 __ inline_cache_check(receiver, Ricklass); 167 return offset; 168 } 169 170 void LIR_Assembler::clinit_barrier(ciMethod* method) { 171 ShouldNotReachHere(); // not implemented 172 } 173 174 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 175 jobject o = (jobject)Universe::non_oop_word(); 176 int index = __ oop_recorder()->allocate_oop_index(o); 177 178 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index); 179 180 __ patchable_mov_oop(reg, o, index); 181 patching_epilog(patch, lir_patch_normal, reg, info); 182 } 183 184 185 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 186 Metadata* o = (Metadata*)Universe::non_oop_word(); 187 int index = __ oop_recorder()->allocate_metadata_index(o); 188 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 189 190 __ patchable_mov_metadata(reg, o, index); 191 patching_epilog(patch, lir_patch_normal, reg, info); 192 } 193 194 195 int LIR_Assembler::initial_frame_size_in_bytes() const { 196 // Subtracts two words to account for return address and link 197 return frame_map()->framesize()*VMRegImpl::stack_slot_size - 2*wordSize; 198 } 199 200 201 int LIR_Assembler::emit_exception_handler() { 202 address handler_base = __ start_a_stub(exception_handler_size()); 203 if (handler_base == nullptr) { 204 bailout("exception handler overflow"); 205 return -1; 206 } 207 208 int offset = code_offset(); 209 210 // check that there is really an exception 211 __ verify_not_null_oop(Rexception_obj); 212 213 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 214 __ should_not_reach_here(); 215 216 assert(code_offset() - offset <= exception_handler_size(), "overflow"); 217 __ end_a_stub(); 218 219 return offset; 220 } 221 222 // Emit the code to remove the frame from the stack in the exception 223 // unwind path. 224 int LIR_Assembler::emit_unwind_handler() { 225 #ifndef PRODUCT 226 if (CommentedAssembly) { 227 _masm->block_comment("Unwind handler"); 228 } 229 #endif 230 231 int offset = code_offset(); 232 233 // Fetch the exception from TLS and clear out exception related thread state 234 Register zero = __ zero_register(Rtemp); 235 __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset())); 236 __ str(zero, Address(Rthread, JavaThread::exception_oop_offset())); 237 __ str(zero, Address(Rthread, JavaThread::exception_pc_offset())); 238 239 __ bind(_unwind_handler_entry); 240 __ verify_not_null_oop(Rexception_obj); 241 242 // Perform needed unlocking 243 MonitorExitStub* stub = nullptr; 244 if (method()->is_synchronized()) { 245 monitor_address(0, FrameMap::R0_opr); 246 stub = new MonitorExitStub(FrameMap::R0_opr, true, 0); 247 __ unlock_object(R2, R1, R0, *stub->entry()); 248 __ bind(*stub->continuation()); 249 } 250 251 // remove the activation and dispatch to the unwind handler 252 __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR 253 __ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp); 254 255 // Emit the slow path assembly 256 if (stub != nullptr) { 257 stub->emit_code(this); 258 } 259 260 return offset; 261 } 262 263 264 int LIR_Assembler::emit_deopt_handler() { 265 address handler_base = __ start_a_stub(deopt_handler_size()); 266 if (handler_base == nullptr) { 267 bailout("deopt handler overflow"); 268 return -1; 269 } 270 271 int offset = code_offset(); 272 273 __ mov_relative_address(LR, __ pc()); 274 __ push(LR); // stub expects LR to be saved 275 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg); 276 277 assert(code_offset() - offset <= deopt_handler_size(), "overflow"); 278 __ end_a_stub(); 279 280 return offset; 281 } 282 283 284 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 285 // Pop the frame before safepoint polling 286 __ remove_frame(initial_frame_size_in_bytes()); 287 __ read_polling_page(Rtemp, relocInfo::poll_return_type); 288 __ ret(); 289 } 290 291 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 292 293 int offset = __ offset(); 294 __ get_polling_page(Rtemp); 295 __ relocate(relocInfo::poll_type); 296 add_debug_info_for_branch(info); // help pc_desc_at to find correct scope for current PC 297 __ ldr(Rtemp, Address(Rtemp)); 298 299 return offset; 300 } 301 302 303 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 304 if (from_reg != to_reg) { 305 __ mov(to_reg, from_reg); 306 } 307 } 308 309 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 310 assert(src->is_constant() && dest->is_register(), "must be"); 311 LIR_Const* c = src->as_constant_ptr(); 312 313 switch (c->type()) { 314 case T_ADDRESS: 315 case T_INT: 316 assert(patch_code == lir_patch_none, "no patching handled here"); 317 __ mov_slow(dest->as_register(), c->as_jint()); 318 break; 319 320 case T_LONG: 321 assert(patch_code == lir_patch_none, "no patching handled here"); 322 __ mov_slow(dest->as_register_lo(), c->as_jint_lo()); 323 __ mov_slow(dest->as_register_hi(), c->as_jint_hi()); 324 break; 325 326 case T_OBJECT: 327 if (patch_code == lir_patch_none) { 328 __ mov_oop(dest->as_register(), c->as_jobject()); 329 } else { 330 jobject2reg_with_patching(dest->as_register(), info); 331 } 332 break; 333 334 case T_METADATA: 335 if (patch_code == lir_patch_none) { 336 __ mov_metadata(dest->as_register(), c->as_metadata()); 337 } else { 338 klass2reg_with_patching(dest->as_register(), info); 339 } 340 break; 341 342 case T_FLOAT: 343 if (dest->is_single_fpu()) { 344 __ mov_float(dest->as_float_reg(), c->as_jfloat()); 345 } else { 346 // Simple getters can return float constant directly into r0 347 __ mov_slow(dest->as_register(), c->as_jint_bits()); 348 } 349 break; 350 351 case T_DOUBLE: 352 if (dest->is_double_fpu()) { 353 __ mov_double(dest->as_double_reg(), c->as_jdouble()); 354 } else { 355 // Simple getters can return double constant directly into r1r0 356 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits()); 357 __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits()); 358 } 359 break; 360 361 default: 362 ShouldNotReachHere(); 363 } 364 } 365 366 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 367 assert(src->is_constant(), "must be"); 368 assert(dest->is_stack(), "must be"); 369 LIR_Const* c = src->as_constant_ptr(); 370 371 switch (c->type()) { 372 case T_INT: // fall through 373 case T_FLOAT: 374 __ mov_slow(Rtemp, c->as_jint_bits()); 375 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 376 break; 377 378 case T_ADDRESS: 379 __ mov_slow(Rtemp, c->as_jint()); 380 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 381 break; 382 383 case T_OBJECT: 384 __ mov_oop(Rtemp, c->as_jobject()); 385 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 386 break; 387 388 case T_LONG: // fall through 389 case T_DOUBLE: 390 __ mov_slow(Rtemp, c->as_jint_lo_bits()); 391 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 392 if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) { 393 __ mov_slow(Rtemp, c->as_jint_hi_bits()); 394 } 395 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 396 break; 397 398 default: 399 ShouldNotReachHere(); 400 } 401 } 402 403 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 404 CodeEmitInfo* info, bool wide) { 405 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == nullptr),"cannot handle otherwise"); 406 __ mov(Rtemp, 0); 407 408 int null_check_offset = code_offset(); 409 __ str(Rtemp, as_Address(dest->as_address_ptr())); 410 411 if (info != nullptr) { 412 assert(false, "arm32 didn't support this before, investigate if bug"); 413 add_debug_info_for_null_check(null_check_offset, info); 414 } 415 } 416 417 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 418 assert(src->is_register() && dest->is_register(), "must be"); 419 420 if (src->is_single_cpu()) { 421 if (dest->is_single_cpu()) { 422 move_regs(src->as_register(), dest->as_register()); 423 } else if (dest->is_single_fpu()) { 424 __ fmsr(dest->as_float_reg(), src->as_register()); 425 } else { 426 ShouldNotReachHere(); 427 } 428 } else if (src->is_double_cpu()) { 429 if (dest->is_double_cpu()) { 430 __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi()); 431 } else { 432 __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi()); 433 } 434 } else if (src->is_single_fpu()) { 435 if (dest->is_single_fpu()) { 436 __ mov_float(dest->as_float_reg(), src->as_float_reg()); 437 } else if (dest->is_single_cpu()) { 438 __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg()); 439 } else { 440 ShouldNotReachHere(); 441 } 442 } else if (src->is_double_fpu()) { 443 if (dest->is_double_fpu()) { 444 __ mov_double(dest->as_double_reg(), src->as_double_reg()); 445 } else if (dest->is_double_cpu()) { 446 __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg()); 447 } else { 448 ShouldNotReachHere(); 449 } 450 } else { 451 ShouldNotReachHere(); 452 } 453 } 454 455 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 456 assert(src->is_register(), "should not call otherwise"); 457 assert(dest->is_stack(), "should not call otherwise"); 458 459 Address addr = dest->is_single_word() ? 460 frame_map()->address_for_slot(dest->single_stack_ix()) : 461 frame_map()->address_for_slot(dest->double_stack_ix()); 462 463 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 464 if (src->is_single_fpu() || src->is_double_fpu()) { 465 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 466 } 467 468 if (src->is_single_cpu()) { 469 switch (type) { 470 case T_OBJECT: 471 case T_ARRAY: __ verify_oop(src->as_register()); // fall through 472 case T_ADDRESS: 473 case T_METADATA: __ str(src->as_register(), addr); break; 474 case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through 475 case T_INT: __ str_32(src->as_register(), addr); break; 476 default: 477 ShouldNotReachHere(); 478 } 479 } else if (src->is_double_cpu()) { 480 __ str(src->as_register_lo(), addr); 481 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 482 } else if (src->is_single_fpu()) { 483 __ str_float(src->as_float_reg(), addr); 484 } else if (src->is_double_fpu()) { 485 __ str_double(src->as_double_reg(), addr); 486 } else { 487 ShouldNotReachHere(); 488 } 489 } 490 491 492 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 493 LIR_PatchCode patch_code, CodeEmitInfo* info, 494 bool pop_fpu_stack, bool wide) { 495 LIR_Address* to_addr = dest->as_address_ptr(); 496 Register base_reg = to_addr->base()->as_pointer_register(); 497 const bool needs_patching = (patch_code != lir_patch_none); 498 499 PatchingStub* patch = nullptr; 500 if (needs_patching) { 501 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 502 } 503 504 int null_check_offset = code_offset(); 505 506 switch (type) { 507 case T_ARRAY: 508 case T_OBJECT: 509 if (UseCompressedOops && !wide) { 510 ShouldNotReachHere(); 511 } else { 512 __ str(src->as_register(), as_Address(to_addr)); 513 } 514 break; 515 516 case T_ADDRESS: 517 __ str(src->as_pointer_register(), as_Address(to_addr)); 518 break; 519 520 case T_BYTE: 521 case T_BOOLEAN: 522 __ strb(src->as_register(), as_Address(to_addr)); 523 break; 524 525 case T_CHAR: 526 case T_SHORT: 527 __ strh(src->as_register(), as_Address(to_addr)); 528 break; 529 530 case T_INT: 531 #ifdef __SOFTFP__ 532 case T_FLOAT: 533 #endif // __SOFTFP__ 534 __ str_32(src->as_register(), as_Address(to_addr)); 535 break; 536 537 538 #ifdef __SOFTFP__ 539 case T_DOUBLE: 540 #endif // __SOFTFP__ 541 case T_LONG: { 542 Register from_lo = src->as_register_lo(); 543 Register from_hi = src->as_register_hi(); 544 if (to_addr->index()->is_register()) { 545 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 546 assert(to_addr->disp() == 0, "Not yet supporting both"); 547 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 548 base_reg = Rtemp; 549 __ str(from_lo, Address(Rtemp)); 550 if (patch != nullptr) { 551 __ nop(); // see comment before patching_epilog for 2nd str 552 patching_epilog(patch, lir_patch_low, base_reg, info); 553 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 554 patch_code = lir_patch_high; 555 } 556 __ str(from_hi, Address(Rtemp, BytesPerWord)); 557 } else if (base_reg == from_lo) { 558 __ str(from_hi, as_Address_hi(to_addr)); 559 if (patch != nullptr) { 560 __ nop(); // see comment before patching_epilog for 2nd str 561 patching_epilog(patch, lir_patch_high, base_reg, info); 562 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 563 patch_code = lir_patch_low; 564 } 565 __ str(from_lo, as_Address_lo(to_addr)); 566 } else { 567 __ str(from_lo, as_Address_lo(to_addr)); 568 if (patch != nullptr) { 569 __ nop(); // see comment before patching_epilog for 2nd str 570 patching_epilog(patch, lir_patch_low, base_reg, info); 571 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 572 patch_code = lir_patch_high; 573 } 574 __ str(from_hi, as_Address_hi(to_addr)); 575 } 576 break; 577 } 578 579 #ifndef __SOFTFP__ 580 case T_FLOAT: 581 if (to_addr->index()->is_register()) { 582 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 583 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 584 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 585 __ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp())); 586 } else { 587 __ fsts(src->as_float_reg(), as_Address(to_addr)); 588 } 589 break; 590 591 case T_DOUBLE: 592 if (to_addr->index()->is_register()) { 593 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 594 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 595 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 596 __ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp())); 597 } else { 598 __ fstd(src->as_double_reg(), as_Address(to_addr)); 599 } 600 break; 601 #endif // __SOFTFP__ 602 603 604 default: 605 ShouldNotReachHere(); 606 } 607 608 if (info != nullptr) { 609 add_debug_info_for_null_check(null_check_offset, info); 610 } 611 612 if (patch != nullptr) { 613 // Offset embedded into LDR/STR instruction may appear not enough 614 // to address a field. So, provide a space for one more instruction 615 // that will deal with larger offsets. 616 __ nop(); 617 patching_epilog(patch, patch_code, base_reg, info); 618 } 619 } 620 621 622 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 623 assert(src->is_stack(), "should not call otherwise"); 624 assert(dest->is_register(), "should not call otherwise"); 625 626 Address addr = src->is_single_word() ? 627 frame_map()->address_for_slot(src->single_stack_ix()) : 628 frame_map()->address_for_slot(src->double_stack_ix()); 629 630 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 631 if (dest->is_single_fpu() || dest->is_double_fpu()) { 632 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 633 } 634 635 if (dest->is_single_cpu()) { 636 switch (type) { 637 case T_OBJECT: 638 case T_ARRAY: 639 case T_ADDRESS: 640 case T_METADATA: __ ldr(dest->as_register(), addr); break; 641 case T_FLOAT: // used in floatToRawIntBits intrinsic implementation 642 case T_INT: __ ldr_u32(dest->as_register(), addr); break; 643 default: 644 ShouldNotReachHere(); 645 } 646 if ((type == T_OBJECT) || (type == T_ARRAY)) { 647 __ verify_oop(dest->as_register()); 648 } 649 } else if (dest->is_double_cpu()) { 650 __ ldr(dest->as_register_lo(), addr); 651 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 652 } else if (dest->is_single_fpu()) { 653 __ ldr_float(dest->as_float_reg(), addr); 654 } else if (dest->is_double_fpu()) { 655 __ ldr_double(dest->as_double_reg(), addr); 656 } else { 657 ShouldNotReachHere(); 658 } 659 } 660 661 662 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 663 if (src->is_single_stack()) { 664 switch (src->type()) { 665 case T_OBJECT: 666 case T_ARRAY: 667 case T_ADDRESS: 668 case T_METADATA: 669 __ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 670 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 671 break; 672 673 case T_INT: 674 case T_FLOAT: 675 __ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 676 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 677 break; 678 679 default: 680 ShouldNotReachHere(); 681 } 682 } else { 683 assert(src->is_double_stack(), "must be"); 684 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes)); 685 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 686 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 687 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 688 } 689 } 690 691 692 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, 693 LIR_PatchCode patch_code, CodeEmitInfo* info, 694 bool wide) { 695 assert(src->is_address(), "should not call otherwise"); 696 assert(dest->is_register(), "should not call otherwise"); 697 LIR_Address* addr = src->as_address_ptr(); 698 699 Register base_reg = addr->base()->as_pointer_register(); 700 701 PatchingStub* patch = nullptr; 702 if (patch_code != lir_patch_none) { 703 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 704 } 705 if (info != nullptr) { 706 add_debug_info_for_null_check_here(info); 707 } 708 709 switch (type) { 710 case T_OBJECT: // fall through 711 case T_ARRAY: 712 if (UseCompressedOops && !wide) { 713 __ ldr_u32(dest->as_register(), as_Address(addr)); 714 } else { 715 __ ldr(dest->as_register(), as_Address(addr)); 716 } 717 break; 718 719 case T_ADDRESS: 720 __ ldr(dest->as_pointer_register(), as_Address(addr)); 721 break; 722 723 case T_INT: 724 #ifdef __SOFTFP__ 725 case T_FLOAT: 726 #endif // __SOFTFP__ 727 __ ldr(dest->as_pointer_register(), as_Address(addr)); 728 break; 729 730 case T_BOOLEAN: 731 __ ldrb(dest->as_register(), as_Address(addr)); 732 break; 733 734 case T_BYTE: 735 __ ldrsb(dest->as_register(), as_Address(addr)); 736 break; 737 738 case T_CHAR: 739 __ ldrh(dest->as_register(), as_Address(addr)); 740 break; 741 742 case T_SHORT: 743 __ ldrsh(dest->as_register(), as_Address(addr)); 744 break; 745 746 747 #ifdef __SOFTFP__ 748 case T_DOUBLE: 749 #endif // __SOFTFP__ 750 case T_LONG: { 751 Register to_lo = dest->as_register_lo(); 752 Register to_hi = dest->as_register_hi(); 753 if (addr->index()->is_register()) { 754 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 755 assert(addr->disp() == 0, "Not yet supporting both"); 756 __ add(Rtemp, base_reg, addr->index()->as_register()); 757 base_reg = Rtemp; 758 __ ldr(to_lo, Address(Rtemp)); 759 if (patch != nullptr) { 760 __ nop(); // see comment before patching_epilog for 2nd ldr 761 patching_epilog(patch, lir_patch_low, base_reg, info); 762 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 763 patch_code = lir_patch_high; 764 } 765 __ ldr(to_hi, Address(Rtemp, BytesPerWord)); 766 } else if (base_reg == to_lo) { 767 __ ldr(to_hi, as_Address_hi(addr)); 768 if (patch != nullptr) { 769 __ nop(); // see comment before patching_epilog for 2nd ldr 770 patching_epilog(patch, lir_patch_high, base_reg, info); 771 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 772 patch_code = lir_patch_low; 773 } 774 __ ldr(to_lo, as_Address_lo(addr)); 775 } else { 776 __ ldr(to_lo, as_Address_lo(addr)); 777 if (patch != nullptr) { 778 __ nop(); // see comment before patching_epilog for 2nd ldr 779 patching_epilog(patch, lir_patch_low, base_reg, info); 780 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 781 patch_code = lir_patch_high; 782 } 783 __ ldr(to_hi, as_Address_hi(addr)); 784 } 785 break; 786 } 787 788 #ifndef __SOFTFP__ 789 case T_FLOAT: 790 if (addr->index()->is_register()) { 791 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 792 __ add(Rtemp, base_reg, addr->index()->as_register()); 793 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 794 __ flds(dest->as_float_reg(), Address(Rtemp, addr->disp())); 795 } else { 796 __ flds(dest->as_float_reg(), as_Address(addr)); 797 } 798 break; 799 800 case T_DOUBLE: 801 if (addr->index()->is_register()) { 802 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 803 __ add(Rtemp, base_reg, addr->index()->as_register()); 804 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 805 __ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp())); 806 } else { 807 __ fldd(dest->as_double_reg(), as_Address(addr)); 808 } 809 break; 810 #endif // __SOFTFP__ 811 812 813 default: 814 ShouldNotReachHere(); 815 } 816 817 if (patch != nullptr) { 818 // Offset embedded into LDR/STR instruction may appear not enough 819 // to address a field. So, provide a space for one more instruction 820 // that will deal with larger offsets. 821 __ nop(); 822 patching_epilog(patch, patch_code, base_reg, info); 823 } 824 825 } 826 827 828 void LIR_Assembler::emit_op3(LIR_Op3* op) { 829 bool is_32 = op->result_opr()->is_single_cpu(); 830 831 if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) { 832 int c = op->in_opr2()->as_constant_ptr()->as_jint(); 833 assert(is_power_of_2(c), "non power-of-2 constant should be put in a register"); 834 835 Register left = op->in_opr1()->as_register(); 836 Register dest = op->result_opr()->as_register(); 837 if (c == 1) { 838 __ mov(dest, left); 839 } else if (c == 2) { 840 __ add_32(dest, left, AsmOperand(left, lsr, 31)); 841 __ asr_32(dest, dest, 1); 842 } else if (c != (int) 0x80000000) { 843 int power = log2i_exact(c); 844 __ asr_32(Rtemp, left, 31); 845 __ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0); 846 __ asr_32(dest, dest, power); // dest = dest >>> power; 847 } else { 848 // x/0x80000000 is a special case, since dividend is a power of two, but is negative. 849 // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000. 850 __ cmp_32(left, c); 851 __ mov(dest, 0, ne); 852 __ mov(dest, 1, eq); 853 } 854 } else { 855 assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3"); 856 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type); 857 add_debug_info_for_div0_here(op->info()); 858 } 859 } 860 861 862 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 863 #ifdef ASSERT 864 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 865 if (op->block() != nullptr) _branch_target_blocks.append(op->block()); 866 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock()); 867 assert(op->info() == nullptr, "CodeEmitInfo?"); 868 #endif // ASSERT 869 870 #ifdef __SOFTFP__ 871 assert (op->code() != lir_cond_float_branch, "this should be impossible"); 872 #else 873 if (op->code() == lir_cond_float_branch) { 874 __ fmstat(); 875 __ b(*(op->ublock()->label()), vs); 876 } 877 #endif // __SOFTFP__ 878 879 AsmCondition acond = al; 880 switch (op->cond()) { 881 case lir_cond_equal: acond = eq; break; 882 case lir_cond_notEqual: acond = ne; break; 883 case lir_cond_less: acond = lt; break; 884 case lir_cond_lessEqual: acond = le; break; 885 case lir_cond_greaterEqual: acond = ge; break; 886 case lir_cond_greater: acond = gt; break; 887 case lir_cond_aboveEqual: acond = hs; break; 888 case lir_cond_belowEqual: acond = ls; break; 889 default: assert(op->cond() == lir_cond_always, "must be"); 890 } 891 __ b(*(op->label()), acond); 892 } 893 894 895 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 896 LIR_Opr src = op->in_opr(); 897 LIR_Opr dest = op->result_opr(); 898 899 switch (op->bytecode()) { 900 case Bytecodes::_i2l: 901 move_regs(src->as_register(), dest->as_register_lo()); 902 __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31)); 903 break; 904 case Bytecodes::_l2i: 905 move_regs(src->as_register_lo(), dest->as_register()); 906 break; 907 case Bytecodes::_i2b: 908 __ sign_extend(dest->as_register(), src->as_register(), 8); 909 break; 910 case Bytecodes::_i2s: 911 __ sign_extend(dest->as_register(), src->as_register(), 16); 912 break; 913 case Bytecodes::_i2c: 914 __ zero_extend(dest->as_register(), src->as_register(), 16); 915 break; 916 case Bytecodes::_f2d: 917 __ convert_f2d(dest->as_double_reg(), src->as_float_reg()); 918 break; 919 case Bytecodes::_d2f: 920 __ convert_d2f(dest->as_float_reg(), src->as_double_reg()); 921 break; 922 case Bytecodes::_i2f: 923 __ fmsr(Stemp, src->as_register()); 924 __ fsitos(dest->as_float_reg(), Stemp); 925 break; 926 case Bytecodes::_i2d: 927 __ fmsr(Stemp, src->as_register()); 928 __ fsitod(dest->as_double_reg(), Stemp); 929 break; 930 case Bytecodes::_f2i: 931 __ ftosizs(Stemp, src->as_float_reg()); 932 __ fmrs(dest->as_register(), Stemp); 933 break; 934 case Bytecodes::_d2i: 935 __ ftosizd(Stemp, src->as_double_reg()); 936 __ fmrs(dest->as_register(), Stemp); 937 break; 938 default: 939 ShouldNotReachHere(); 940 } 941 } 942 943 944 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 945 if (op->init_check()) { 946 Register tmp = op->tmp1()->as_register(); 947 __ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset())); 948 add_debug_info_for_null_check_here(op->stub()->info()); 949 __ cmp(tmp, InstanceKlass::fully_initialized); 950 __ b(*op->stub()->entry(), ne); 951 } 952 __ allocate_object(op->obj()->as_register(), 953 op->tmp1()->as_register(), 954 op->tmp2()->as_register(), 955 op->tmp3()->as_register(), 956 op->header_size(), 957 op->object_size(), 958 op->klass()->as_register(), 959 *op->stub()->entry()); 960 __ bind(*op->stub()->continuation()); 961 } 962 963 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 964 if (UseSlowPath || 965 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 966 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 967 __ b(*op->stub()->entry()); 968 } else { 969 __ allocate_array(op->obj()->as_register(), 970 op->len()->as_register(), 971 op->tmp1()->as_register(), 972 op->tmp2()->as_register(), 973 op->tmp3()->as_register(), 974 arrayOopDesc::base_offset_in_bytes(op->type()), 975 type2aelembytes(op->type()), 976 op->klass()->as_register(), 977 *op->stub()->entry()); 978 } 979 __ bind(*op->stub()->continuation()); 980 } 981 982 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 983 ciMethodData *md, ciProfileData *data, 984 Register recv, Register tmp1, Label* update_done) { 985 assert_different_registers(mdo, recv, tmp1); 986 uint i; 987 for (i = 0; i < VirtualCallData::row_limit(); i++) { 988 Label next_test; 989 // See if the receiver is receiver[n]. 990 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 991 mdo_offset_bias); 992 __ ldr(tmp1, receiver_addr); 993 __ verify_klass_ptr(tmp1); 994 __ cmp(recv, tmp1); 995 __ b(next_test, ne); 996 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 997 mdo_offset_bias); 998 __ ldr(tmp1, data_addr); 999 __ add(tmp1, tmp1, DataLayout::counter_increment); 1000 __ str(tmp1, data_addr); 1001 __ b(*update_done); 1002 __ bind(next_test); 1003 } 1004 1005 // Didn't find receiver; find next empty slot and fill it in 1006 for (i = 0; i < VirtualCallData::row_limit(); i++) { 1007 Label next_test; 1008 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 1009 mdo_offset_bias); 1010 __ ldr(tmp1, recv_addr); 1011 __ cbnz(tmp1, next_test); 1012 __ str(recv, recv_addr); 1013 __ mov(tmp1, DataLayout::counter_increment); 1014 __ str(tmp1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 1015 mdo_offset_bias)); 1016 __ b(*update_done); 1017 __ bind(next_test); 1018 } 1019 } 1020 1021 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 1022 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 1023 md = method->method_data_or_null(); 1024 assert(md != nullptr, "Sanity"); 1025 data = md->bci_to_data(bci); 1026 assert(data != nullptr, "need data for checkcast"); 1027 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1028 if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) { 1029 // The offset is large so bias the mdo by the base of the slot so 1030 // that the ldr can use an immediate offset to reference the slots of the data 1031 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 1032 } 1033 } 1034 1035 // On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null). 1036 void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci, 1037 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias, 1038 Register obj, Register mdo, Register data_val, Label* obj_is_null) { 1039 assert(method != nullptr, "Should have method"); 1040 assert_different_registers(obj, mdo, data_val); 1041 setup_md_access(method, bci, md, data, mdo_offset_bias); 1042 Label not_null; 1043 __ b(not_null, ne); 1044 __ mov_metadata(mdo, md->constant_encoding()); 1045 if (mdo_offset_bias > 0) { 1046 __ mov_slow(data_val, mdo_offset_bias); 1047 __ add(mdo, mdo, data_val); 1048 } 1049 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 1050 __ ldrb(data_val, flags_addr); 1051 __ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant()); 1052 __ strb(data_val, flags_addr); 1053 __ b(*obj_is_null); 1054 __ bind(not_null); 1055 } 1056 1057 void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias, 1058 Register mdo, Register recv, Register value, Register tmp1, 1059 Label* profile_cast_success, Label* profile_cast_failure, 1060 Label* success, Label* failure) { 1061 assert_different_registers(mdo, value, tmp1); 1062 __ bind(*profile_cast_success); 1063 __ mov_metadata(mdo, md->constant_encoding()); 1064 if (mdo_offset_bias > 0) { 1065 __ mov_slow(tmp1, mdo_offset_bias); 1066 __ add(mdo, mdo, tmp1); 1067 } 1068 __ load_klass(recv, value); 1069 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 1070 __ b(*success); 1071 // Cast failure case 1072 __ bind(*profile_cast_failure); 1073 __ mov_metadata(mdo, md->constant_encoding()); 1074 if (mdo_offset_bias > 0) { 1075 __ mov_slow(tmp1, mdo_offset_bias); 1076 __ add(mdo, mdo, tmp1); 1077 } 1078 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 1079 __ ldr(tmp1, data_addr); 1080 __ sub(tmp1, tmp1, DataLayout::counter_increment); 1081 __ str(tmp1, data_addr); 1082 __ b(*failure); 1083 } 1084 1085 // Sets `res` to true, if `cond` holds. 1086 static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) { 1087 __ mov(res, 1, cond); 1088 } 1089 1090 1091 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1092 // TODO: ARM - can be more effective with one more register 1093 switch (op->code()) { 1094 case lir_store_check: { 1095 CodeStub* stub = op->stub(); 1096 Register value = op->object()->as_register(); 1097 Register array = op->array()->as_register(); 1098 Register klass_RInfo = op->tmp1()->as_register(); 1099 Register k_RInfo = op->tmp2()->as_register(); 1100 assert_different_registers(klass_RInfo, k_RInfo, Rtemp); 1101 if (op->should_profile()) { 1102 assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp); 1103 } 1104 1105 // check if it needs to be profiled 1106 ciMethodData* md; 1107 ciProfileData* data; 1108 int mdo_offset_bias = 0; 1109 Label profile_cast_success, profile_cast_failure, done; 1110 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1111 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1112 1113 if (op->should_profile()) { 1114 __ cmp(value, 0); 1115 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done); 1116 } else { 1117 __ cbz(value, done); 1118 } 1119 assert_different_registers(k_RInfo, value); 1120 add_debug_info_for_null_check_here(op->info_for_exception()); 1121 __ load_klass(k_RInfo, array); 1122 __ load_klass(klass_RInfo, value); 1123 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1124 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1125 // check for immediate positive hit 1126 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1127 __ cmp(klass_RInfo, k_RInfo); 1128 __ cond_cmp(Rtemp, k_RInfo, ne); 1129 __ b(*success_target, eq); 1130 // check for immediate negative hit 1131 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1132 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1133 __ b(*failure_target, ne); 1134 // slow case 1135 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1136 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1137 __ cbz(R0, *failure_target); 1138 if (op->should_profile()) { 1139 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1140 if (mdo == value) { 1141 mdo = k_RInfo; 1142 recv = klass_RInfo; 1143 } 1144 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, value, tmp1, 1145 &profile_cast_success, &profile_cast_failure, 1146 &done, stub->entry()); 1147 } 1148 __ bind(done); 1149 break; 1150 } 1151 1152 case lir_checkcast: { 1153 CodeStub* stub = op->stub(); 1154 Register obj = op->object()->as_register(); 1155 Register res = op->result_opr()->as_register(); 1156 Register klass_RInfo = op->tmp1()->as_register(); 1157 Register k_RInfo = op->tmp2()->as_register(); 1158 ciKlass* k = op->klass(); 1159 assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp); 1160 1161 if (stub->is_simple_exception_stub()) { 1162 // TODO: ARM - Late binding is used to prevent confusion of register allocator 1163 assert(stub->is_exception_throw_stub(), "must be"); 1164 ((SimpleExceptionStub*)stub)->set_obj(op->result_opr()); 1165 } 1166 ciMethodData* md; 1167 ciProfileData* data; 1168 int mdo_offset_bias = 0; 1169 1170 Label done; 1171 1172 Label profile_cast_failure, profile_cast_success; 1173 Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry(); 1174 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1175 1176 1177 __ movs(res, obj); 1178 if (op->should_profile()) { 1179 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1180 } else { 1181 __ b(done, eq); 1182 } 1183 if (k->is_loaded()) { 1184 __ mov_metadata(k_RInfo, k->constant_encoding()); 1185 } else if (k_RInfo != obj) { 1186 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1187 __ movs(res, obj); 1188 } else { 1189 // Patching doesn't update "res" register after GC, so do patching first 1190 klass2reg_with_patching(Rtemp, op->info_for_patch()); 1191 __ movs(res, obj); 1192 __ mov(k_RInfo, Rtemp); 1193 } 1194 __ load_klass(klass_RInfo, res, ne); 1195 1196 if (op->fast_check()) { 1197 __ cmp(klass_RInfo, k_RInfo, ne); 1198 __ b(*failure_target, ne); 1199 } else if (k->is_loaded()) { 1200 __ b(*success_target, eq); 1201 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1202 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1203 __ cmp(Rtemp, k_RInfo); 1204 __ b(*failure_target, ne); 1205 } else { 1206 __ cmp(klass_RInfo, k_RInfo); 1207 __ cmp(Rtemp, k_RInfo, ne); 1208 __ b(*success_target, eq); 1209 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1210 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1211 __ cbz(R0, *failure_target); 1212 } 1213 } else { 1214 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1215 __ b(*success_target, eq); 1216 // check for immediate positive hit 1217 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1218 __ cmp(klass_RInfo, k_RInfo); 1219 __ cmp(Rtemp, k_RInfo, ne); 1220 __ b(*success_target, eq); 1221 // check for immediate negative hit 1222 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1223 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1224 __ b(*failure_target, ne); 1225 // slow case 1226 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1227 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1228 __ cbz(R0, *failure_target); 1229 } 1230 1231 if (op->should_profile()) { 1232 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1233 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1234 &profile_cast_success, &profile_cast_failure, 1235 &done, stub->entry()); 1236 } 1237 __ bind(done); 1238 break; 1239 } 1240 1241 case lir_instanceof: { 1242 Register obj = op->object()->as_register(); 1243 Register res = op->result_opr()->as_register(); 1244 Register klass_RInfo = op->tmp1()->as_register(); 1245 Register k_RInfo = op->tmp2()->as_register(); 1246 ciKlass* k = op->klass(); 1247 assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp); 1248 1249 ciMethodData* md; 1250 ciProfileData* data; 1251 int mdo_offset_bias = 0; 1252 1253 Label done; 1254 1255 Label profile_cast_failure, profile_cast_success; 1256 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done; 1257 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1258 1259 __ movs(res, obj); 1260 1261 if (op->should_profile()) { 1262 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1263 } else { 1264 __ b(done, eq); 1265 } 1266 1267 if (k->is_loaded()) { 1268 __ mov_metadata(k_RInfo, k->constant_encoding()); 1269 } else { 1270 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res)); 1271 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1272 } 1273 __ load_klass(klass_RInfo, res); 1274 1275 if (!op->should_profile()) { 1276 __ mov(res, 0); 1277 } 1278 1279 if (op->fast_check()) { 1280 __ cmp(klass_RInfo, k_RInfo); 1281 if (!op->should_profile()) { 1282 set_instanceof_result(_masm, res, eq); 1283 } else { 1284 __ b(profile_cast_failure, ne); 1285 } 1286 } else if (k->is_loaded()) { 1287 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1288 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1289 __ cmp(Rtemp, k_RInfo); 1290 if (!op->should_profile()) { 1291 set_instanceof_result(_masm, res, eq); 1292 } else { 1293 __ b(profile_cast_failure, ne); 1294 } 1295 } else { 1296 __ cmp(klass_RInfo, k_RInfo); 1297 __ cond_cmp(Rtemp, k_RInfo, ne); 1298 if (!op->should_profile()) { 1299 set_instanceof_result(_masm, res, eq); 1300 } 1301 __ b(*success_target, eq); 1302 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1303 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1304 if (!op->should_profile()) { 1305 move_regs(R0, res); 1306 } else { 1307 __ cbz(R0, *failure_target); 1308 } 1309 } 1310 } else { 1311 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1312 // check for immediate positive hit 1313 __ cmp(klass_RInfo, k_RInfo); 1314 if (!op->should_profile()) { 1315 __ ldr(res, Address(klass_RInfo, Rtemp), ne); 1316 __ cond_cmp(res, k_RInfo, ne); 1317 set_instanceof_result(_masm, res, eq); 1318 } else { 1319 __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne); 1320 __ cond_cmp(Rtemp, k_RInfo, ne); 1321 } 1322 __ b(*success_target, eq); 1323 // check for immediate negative hit 1324 if (op->should_profile()) { 1325 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1326 } 1327 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1328 if (!op->should_profile()) { 1329 __ mov(res, 0, ne); 1330 } 1331 __ b(*failure_target, ne); 1332 // slow case 1333 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1334 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1335 if (!op->should_profile()) { 1336 move_regs(R0, res); 1337 } 1338 if (op->should_profile()) { 1339 __ cbz(R0, *failure_target); 1340 } 1341 } 1342 1343 if (op->should_profile()) { 1344 Label done_ok, done_failure; 1345 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1346 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1347 &profile_cast_success, &profile_cast_failure, 1348 &done_ok, &done_failure); 1349 __ bind(done_failure); 1350 __ mov(res, 0); 1351 __ b(done); 1352 __ bind(done_ok); 1353 __ mov(res, 1); 1354 } 1355 __ bind(done); 1356 break; 1357 } 1358 default: 1359 ShouldNotReachHere(); 1360 } 1361 } 1362 1363 1364 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1365 // if (*addr == cmpval) { 1366 // *addr = newval; 1367 // dest = 1; 1368 // } else { 1369 // dest = 0; 1370 // } 1371 // FIXME: membar_release 1372 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 1373 Register addr = op->addr()->is_register() ? 1374 op->addr()->as_pointer_register() : 1375 op->addr()->as_address_ptr()->base()->as_pointer_register(); 1376 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp"); 1377 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_Opr::illegalOpr(), "unexpected index"); 1378 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 1379 Register cmpval = op->cmp_value()->as_register(); 1380 Register newval = op->new_value()->as_register(); 1381 Register dest = op->result_opr()->as_register(); 1382 assert_different_registers(dest, addr, cmpval, newval, Rtemp); 1383 1384 __ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer 1385 __ mov(dest, 1, eq); 1386 __ mov(dest, 0, ne); 1387 } else if (op->code() == lir_cas_long) { 1388 assert(VM_Version::supports_cx8(), "wrong machine"); 1389 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 1390 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 1391 Register new_value_lo = op->new_value()->as_register_lo(); 1392 Register new_value_hi = op->new_value()->as_register_hi(); 1393 Register dest = op->result_opr()->as_register(); 1394 Register tmp_lo = op->tmp1()->as_register_lo(); 1395 Register tmp_hi = op->tmp1()->as_register_hi(); 1396 1397 assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr); 1398 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 1399 assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair"); 1400 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1401 assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1402 __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi, 1403 new_value_lo, new_value_hi, addr, 0); 1404 } else { 1405 Unimplemented(); 1406 } 1407 // FIXME: is full membar really needed instead of just membar_acquire? 1408 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 1409 } 1410 1411 1412 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 1413 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 1414 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on arm"); 1415 1416 AsmCondition acond = al; 1417 AsmCondition ncond = nv; 1418 if (opr1 != opr2) { 1419 switch (condition) { 1420 case lir_cond_equal: acond = eq; ncond = ne; break; 1421 case lir_cond_notEqual: acond = ne; ncond = eq; break; 1422 case lir_cond_less: acond = lt; ncond = ge; break; 1423 case lir_cond_lessEqual: acond = le; ncond = gt; break; 1424 case lir_cond_greaterEqual: acond = ge; ncond = lt; break; 1425 case lir_cond_greater: acond = gt; ncond = le; break; 1426 case lir_cond_aboveEqual: acond = hs; ncond = lo; break; 1427 case lir_cond_belowEqual: acond = ls; ncond = hi; break; 1428 default: ShouldNotReachHere(); 1429 } 1430 } 1431 1432 for (;;) { // two iterations only 1433 if (opr1 == result) { 1434 // do nothing 1435 } else if (opr1->is_single_cpu()) { 1436 __ mov(result->as_register(), opr1->as_register(), acond); 1437 } else if (opr1->is_double_cpu()) { 1438 __ long_move(result->as_register_lo(), result->as_register_hi(), 1439 opr1->as_register_lo(), opr1->as_register_hi(), acond); 1440 } else if (opr1->is_single_stack()) { 1441 __ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond); 1442 } else if (opr1->is_double_stack()) { 1443 __ ldr(result->as_register_lo(), 1444 frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond); 1445 __ ldr(result->as_register_hi(), 1446 frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond); 1447 } else if (opr1->is_illegal()) { 1448 // do nothing: this part of the cmove has been optimized away in the peephole optimizer 1449 } else { 1450 assert(opr1->is_constant(), "must be"); 1451 LIR_Const* c = opr1->as_constant_ptr(); 1452 1453 switch (c->type()) { 1454 case T_INT: 1455 __ mov_slow(result->as_register(), c->as_jint(), acond); 1456 break; 1457 case T_LONG: 1458 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1459 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1460 break; 1461 case T_OBJECT: 1462 __ mov_oop(result->as_register(), c->as_jobject(), 0, acond); 1463 break; 1464 case T_FLOAT: 1465 #ifdef __SOFTFP__ 1466 // not generated now. 1467 __ mov_slow(result->as_register(), c->as_jint(), acond); 1468 #else 1469 __ mov_float(result->as_float_reg(), c->as_jfloat(), acond); 1470 #endif // __SOFTFP__ 1471 break; 1472 case T_DOUBLE: 1473 #ifdef __SOFTFP__ 1474 // not generated now. 1475 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1476 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1477 #else 1478 __ mov_double(result->as_double_reg(), c->as_jdouble(), acond); 1479 #endif // __SOFTFP__ 1480 break; 1481 case T_METADATA: 1482 __ mov_metadata(result->as_register(), c->as_metadata(), acond); 1483 break; 1484 default: 1485 ShouldNotReachHere(); 1486 } 1487 } 1488 1489 // Negate the condition and repeat the algorithm with the second operand 1490 if (opr1 == opr2) { break; } 1491 opr1 = opr2; 1492 acond = ncond; 1493 } 1494 } 1495 1496 #ifdef ASSERT 1497 static int reg_size(LIR_Opr op) { 1498 switch (op->type()) { 1499 case T_FLOAT: 1500 case T_INT: return BytesPerInt; 1501 case T_LONG: 1502 case T_DOUBLE: return BytesPerLong; 1503 case T_OBJECT: 1504 case T_ARRAY: 1505 case T_METADATA: return BytesPerWord; 1506 case T_ADDRESS: 1507 case T_ILLEGAL: // fall through 1508 default: ShouldNotReachHere(); return -1; 1509 } 1510 } 1511 #endif 1512 1513 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1514 assert(info == nullptr, "unused on this code path"); 1515 assert(dest->is_register(), "wrong items state"); 1516 1517 if (right->is_address()) { 1518 // special case for adding shifted/extended register 1519 const Register res = dest->as_pointer_register(); 1520 const Register lreg = left->as_pointer_register(); 1521 const LIR_Address* addr = right->as_address_ptr(); 1522 1523 assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1524 1525 int scale = addr->scale(); 1526 AsmShift shift = lsl; 1527 1528 1529 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be"); 1530 assert(reg_size(addr->base()) == reg_size(dest), "should be"); 1531 assert(reg_size(dest) == wordSize, "should be"); 1532 1533 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale); 1534 switch (code) { 1535 case lir_add: __ add(res, lreg, operand); break; 1536 case lir_sub: __ sub(res, lreg, operand); break; 1537 default: ShouldNotReachHere(); 1538 } 1539 1540 } else if (left->is_address()) { 1541 assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()"); 1542 const LIR_Address* addr = left->as_address_ptr(); 1543 const Register res = dest->as_register(); 1544 const Register rreg = right->as_register(); 1545 assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1546 __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale())); 1547 1548 } else if (dest->is_single_cpu()) { 1549 assert(left->is_single_cpu(), "unexpected left operand"); 1550 1551 const Register res = dest->as_register(); 1552 const Register lreg = left->as_register(); 1553 1554 if (right->is_single_cpu()) { 1555 const Register rreg = right->as_register(); 1556 switch (code) { 1557 case lir_add: __ add_32(res, lreg, rreg); break; 1558 case lir_sub: __ sub_32(res, lreg, rreg); break; 1559 case lir_mul: __ mul_32(res, lreg, rreg); break; 1560 default: ShouldNotReachHere(); 1561 } 1562 } else { 1563 assert(right->is_constant(), "must be"); 1564 const jint c = right->as_constant_ptr()->as_jint(); 1565 if (!Assembler::is_arith_imm_in_range(c)) { 1566 BAILOUT("illegal arithmetic operand"); 1567 } 1568 switch (code) { 1569 case lir_add: __ add_32(res, lreg, c); break; 1570 case lir_sub: __ sub_32(res, lreg, c); break; 1571 default: ShouldNotReachHere(); 1572 } 1573 } 1574 1575 } else if (dest->is_double_cpu()) { 1576 Register res_lo = dest->as_register_lo(); 1577 Register res_hi = dest->as_register_hi(); 1578 Register lreg_lo = left->as_register_lo(); 1579 Register lreg_hi = left->as_register_hi(); 1580 if (right->is_double_cpu()) { 1581 Register rreg_lo = right->as_register_lo(); 1582 Register rreg_hi = right->as_register_hi(); 1583 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1584 res_lo = Rtemp; 1585 } 1586 switch (code) { 1587 case lir_add: 1588 __ adds(res_lo, lreg_lo, rreg_lo); 1589 __ adc(res_hi, lreg_hi, rreg_hi); 1590 break; 1591 case lir_sub: 1592 __ subs(res_lo, lreg_lo, rreg_lo); 1593 __ sbc(res_hi, lreg_hi, rreg_hi); 1594 break; 1595 default: 1596 ShouldNotReachHere(); 1597 } 1598 } else { 1599 assert(right->is_constant(), "must be"); 1600 assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range"); 1601 const jint c = (jint) right->as_constant_ptr()->as_jlong(); 1602 if (res_lo == lreg_hi) { 1603 res_lo = Rtemp; 1604 } 1605 switch (code) { 1606 case lir_add: 1607 __ adds(res_lo, lreg_lo, c); 1608 __ adc(res_hi, lreg_hi, 0); 1609 break; 1610 case lir_sub: 1611 __ subs(res_lo, lreg_lo, c); 1612 __ sbc(res_hi, lreg_hi, 0); 1613 break; 1614 default: 1615 ShouldNotReachHere(); 1616 } 1617 } 1618 move_regs(res_lo, dest->as_register_lo()); 1619 1620 } else if (dest->is_single_fpu()) { 1621 assert(left->is_single_fpu(), "must be"); 1622 assert(right->is_single_fpu(), "must be"); 1623 const FloatRegister res = dest->as_float_reg(); 1624 const FloatRegister lreg = left->as_float_reg(); 1625 const FloatRegister rreg = right->as_float_reg(); 1626 switch (code) { 1627 case lir_add: __ add_float(res, lreg, rreg); break; 1628 case lir_sub: __ sub_float(res, lreg, rreg); break; 1629 case lir_mul: __ mul_float(res, lreg, rreg); break; 1630 case lir_div: __ div_float(res, lreg, rreg); break; 1631 default: ShouldNotReachHere(); 1632 } 1633 } else if (dest->is_double_fpu()) { 1634 assert(left->is_double_fpu(), "must be"); 1635 assert(right->is_double_fpu(), "must be"); 1636 const FloatRegister res = dest->as_double_reg(); 1637 const FloatRegister lreg = left->as_double_reg(); 1638 const FloatRegister rreg = right->as_double_reg(); 1639 switch (code) { 1640 case lir_add: __ add_double(res, lreg, rreg); break; 1641 case lir_sub: __ sub_double(res, lreg, rreg); break; 1642 case lir_mul: __ mul_double(res, lreg, rreg); break; 1643 case lir_div: __ div_double(res, lreg, rreg); break; 1644 default: ShouldNotReachHere(); 1645 } 1646 } else { 1647 ShouldNotReachHere(); 1648 } 1649 } 1650 1651 1652 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 1653 switch (code) { 1654 case lir_abs: 1655 __ abs_double(dest->as_double_reg(), value->as_double_reg()); 1656 break; 1657 case lir_sqrt: 1658 __ sqrt_double(dest->as_double_reg(), value->as_double_reg()); 1659 break; 1660 default: 1661 ShouldNotReachHere(); 1662 } 1663 } 1664 1665 1666 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1667 assert(dest->is_register(), "wrong items state"); 1668 assert(left->is_register(), "wrong items state"); 1669 1670 if (dest->is_single_cpu()) { 1671 1672 const Register res = dest->as_register(); 1673 const Register lreg = left->as_register(); 1674 1675 if (right->is_single_cpu()) { 1676 const Register rreg = right->as_register(); 1677 switch (code) { 1678 case lir_logic_and: __ and_32(res, lreg, rreg); break; 1679 case lir_logic_or: __ orr_32(res, lreg, rreg); break; 1680 case lir_logic_xor: __ eor_32(res, lreg, rreg); break; 1681 default: ShouldNotReachHere(); 1682 } 1683 } else { 1684 assert(right->is_constant(), "must be"); 1685 const uint c = (uint)right->as_constant_ptr()->as_jint(); 1686 if (!Assembler::is_arith_imm_in_range(c)) { 1687 BAILOUT("illegal arithmetic operand"); 1688 } 1689 switch (code) { 1690 case lir_logic_and: __ and_32(res, lreg, c); break; 1691 case lir_logic_or: __ orr_32(res, lreg, c); break; 1692 case lir_logic_xor: __ eor_32(res, lreg, c); break; 1693 default: ShouldNotReachHere(); 1694 } 1695 } 1696 } else { 1697 assert(dest->is_double_cpu(), "should be"); 1698 Register res_lo = dest->as_register_lo(); 1699 1700 assert (dest->type() == T_LONG, "unexpected result type"); 1701 assert (left->type() == T_LONG, "unexpected left type"); 1702 assert (right->type() == T_LONG, "unexpected right type"); 1703 1704 const Register res_hi = dest->as_register_hi(); 1705 const Register lreg_lo = left->as_register_lo(); 1706 const Register lreg_hi = left->as_register_hi(); 1707 1708 if (right->is_register()) { 1709 const Register rreg_lo = right->as_register_lo(); 1710 const Register rreg_hi = right->as_register_hi(); 1711 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1712 res_lo = Rtemp; // Temp register helps to avoid overlap between result and input 1713 } 1714 switch (code) { 1715 case lir_logic_and: 1716 __ andr(res_lo, lreg_lo, rreg_lo); 1717 __ andr(res_hi, lreg_hi, rreg_hi); 1718 break; 1719 case lir_logic_or: 1720 __ orr(res_lo, lreg_lo, rreg_lo); 1721 __ orr(res_hi, lreg_hi, rreg_hi); 1722 break; 1723 case lir_logic_xor: 1724 __ eor(res_lo, lreg_lo, rreg_lo); 1725 __ eor(res_hi, lreg_hi, rreg_hi); 1726 break; 1727 default: 1728 ShouldNotReachHere(); 1729 } 1730 move_regs(res_lo, dest->as_register_lo()); 1731 } else { 1732 assert(right->is_constant(), "must be"); 1733 const jint c_lo = (jint) right->as_constant_ptr()->as_jlong(); 1734 const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32); 1735 // Case for logic_or from do_ClassIDIntrinsic() 1736 if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) { 1737 switch (code) { 1738 case lir_logic_and: 1739 __ andr(res_lo, lreg_lo, c_lo); 1740 __ mov(res_hi, 0); 1741 break; 1742 case lir_logic_or: 1743 __ orr(res_lo, lreg_lo, c_lo); 1744 break; 1745 case lir_logic_xor: 1746 __ eor(res_lo, lreg_lo, c_lo); 1747 break; 1748 default: 1749 ShouldNotReachHere(); 1750 } 1751 } else if (code == lir_logic_and && 1752 c_hi == -1 && 1753 (AsmOperand::is_rotated_imm(c_lo) || 1754 AsmOperand::is_rotated_imm(~c_lo))) { 1755 // Another case which handles logic_and from do_ClassIDIntrinsic() 1756 if (AsmOperand::is_rotated_imm(c_lo)) { 1757 __ andr(res_lo, lreg_lo, c_lo); 1758 } else { 1759 __ bic(res_lo, lreg_lo, ~c_lo); 1760 } 1761 if (res_hi != lreg_hi) { 1762 __ mov(res_hi, lreg_hi); 1763 } 1764 } else { 1765 BAILOUT("64 bit constant cannot be inlined"); 1766 } 1767 } 1768 } 1769 } 1770 1771 1772 1773 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1774 if (opr1->is_single_cpu()) { 1775 if (opr2->is_constant()) { 1776 switch (opr2->as_constant_ptr()->type()) { 1777 case T_INT: { 1778 const jint c = opr2->as_constant_ptr()->as_jint(); 1779 if (Assembler::is_arith_imm_in_range(c)) { 1780 __ cmp_32(opr1->as_register(), c); 1781 } else if (Assembler::is_arith_imm_in_range(-c)) { 1782 __ cmn_32(opr1->as_register(), -c); 1783 } else { 1784 // This can happen when compiling lookupswitch 1785 __ mov_slow(Rtemp, c); 1786 __ cmp_32(opr1->as_register(), Rtemp); 1787 } 1788 break; 1789 } 1790 case T_OBJECT: 1791 assert(opr2->as_constant_ptr()->as_jobject() == nullptr, "cannot handle otherwise"); 1792 __ cmp(opr1->as_register(), 0); 1793 break; 1794 case T_METADATA: 1795 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests"); 1796 assert(opr2->as_constant_ptr()->as_metadata() == nullptr, "cannot handle otherwise"); 1797 __ cmp(opr1->as_register(), 0); 1798 break; 1799 default: 1800 ShouldNotReachHere(); 1801 } 1802 } else if (opr2->is_single_cpu()) { 1803 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1804 assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY, "incompatibe type"); 1805 __ cmpoop(opr1->as_register(), opr2->as_register()); 1806 } else if (opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) { 1807 assert(opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type"); 1808 __ cmp(opr1->as_register(), opr2->as_register()); 1809 } else { 1810 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type"); 1811 __ cmp_32(opr1->as_register(), opr2->as_register()); 1812 } 1813 } else { 1814 ShouldNotReachHere(); 1815 } 1816 } else if (opr1->is_double_cpu()) { 1817 Register xlo = opr1->as_register_lo(); 1818 Register xhi = opr1->as_register_hi(); 1819 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1820 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise"); 1821 __ orrs(Rtemp, xlo, xhi); 1822 } else if (opr2->is_register()) { 1823 Register ylo = opr2->as_register_lo(); 1824 Register yhi = opr2->as_register_hi(); 1825 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1826 __ teq(xhi, yhi); 1827 __ teq(xlo, ylo, eq); 1828 } else { 1829 __ subs(Rtemp, xlo, ylo); 1830 __ sbcs(Rtemp, xhi, yhi); 1831 } 1832 } else { 1833 ShouldNotReachHere(); 1834 } 1835 } else if (opr1->is_single_fpu()) { 1836 if (opr2->is_constant()) { 1837 assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise"); 1838 __ cmp_zero_float(opr1->as_float_reg()); 1839 } else { 1840 __ cmp_float(opr1->as_float_reg(), opr2->as_float_reg()); 1841 } 1842 } else if (opr1->is_double_fpu()) { 1843 if (opr2->is_constant()) { 1844 assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise"); 1845 __ cmp_zero_double(opr1->as_double_reg()); 1846 } else { 1847 __ cmp_double(opr1->as_double_reg(), opr2->as_double_reg()); 1848 } 1849 } else { 1850 ShouldNotReachHere(); 1851 } 1852 } 1853 1854 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1855 const Register res = dst->as_register(); 1856 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1857 comp_op(lir_cond_unknown, left, right, op); 1858 __ fmstat(); 1859 if (code == lir_ucmp_fd2i) { // unordered is less 1860 __ mvn(res, 0, lt); 1861 __ mov(res, 1, ge); 1862 } else { // unordered is greater 1863 __ mov(res, 1, cs); 1864 __ mvn(res, 0, cc); 1865 } 1866 __ mov(res, 0, eq); 1867 1868 } else { 1869 assert(code == lir_cmp_l2i, "must be"); 1870 1871 Label done; 1872 const Register xlo = left->as_register_lo(); 1873 const Register xhi = left->as_register_hi(); 1874 const Register ylo = right->as_register_lo(); 1875 const Register yhi = right->as_register_hi(); 1876 __ cmp(xhi, yhi); 1877 __ mov(res, 1, gt); 1878 __ mvn(res, 0, lt); 1879 __ b(done, ne); 1880 __ subs(res, xlo, ylo); 1881 __ mov(res, 1, hi); 1882 __ mvn(res, 0, lo); 1883 __ bind(done); 1884 } 1885 } 1886 1887 1888 void LIR_Assembler::align_call(LIR_Code code) { 1889 // Not needed 1890 } 1891 1892 1893 void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) { 1894 int ret_addr_offset = __ patchable_call(op->addr(), rtype); 1895 assert(ret_addr_offset == __ offset(), "embedded return address not allowed"); 1896 add_call_info_here(op->info()); 1897 } 1898 1899 1900 void LIR_Assembler::ic_call(LIR_OpJavaCall *op) { 1901 bool near_range = __ cache_fully_reachable(); 1902 address oop_address = pc(); 1903 1904 bool use_movw = VM_Version::supports_movw(); 1905 1906 // Ricklass may contain something that is not a metadata pointer so 1907 // mov_metadata can't be used 1908 InlinedAddress value((address)Universe::non_oop_word()); 1909 InlinedAddress addr(op->addr()); 1910 if (use_movw) { 1911 __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff); 1912 __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16); 1913 } else { 1914 // No movw/movt, must be load a pc relative value but no 1915 // relocation so no metadata table to load from. 1916 // Use a b instruction rather than a bl, inline constant after the 1917 // branch, use a PC relative ldr to load the constant, arrange for 1918 // the call to return after the constant(s). 1919 __ ldr_literal(Ricklass, value); 1920 } 1921 __ relocate(virtual_call_Relocation::spec(oop_address)); 1922 if (near_range && use_movw) { 1923 __ bl(op->addr()); 1924 } else { 1925 Label call_return; 1926 __ adr(LR, call_return); 1927 if (near_range) { 1928 __ b(op->addr()); 1929 } else { 1930 __ indirect_jump(addr, Rtemp); 1931 __ bind_literal(addr); 1932 } 1933 if (!use_movw) { 1934 __ bind_literal(value); 1935 } 1936 __ bind(call_return); 1937 } 1938 add_call_info(code_offset(), op->info()); 1939 } 1940 1941 void LIR_Assembler::emit_static_call_stub() { 1942 address call_pc = __ pc(); 1943 address stub = __ start_a_stub(call_stub_size()); 1944 if (stub == nullptr) { 1945 BAILOUT("static call stub overflow"); 1946 } 1947 1948 DEBUG_ONLY(int offset = code_offset();) 1949 1950 InlinedMetadata metadata_literal(nullptr); 1951 __ relocate(static_stub_Relocation::spec(call_pc)); 1952 // If not a single instruction, NativeMovConstReg::next_instruction_address() 1953 // must jump over the whole following ldr_literal. 1954 // (See CompiledStaticCall::set_to_interpreted()) 1955 #ifdef ASSERT 1956 address ldr_site = __ pc(); 1957 #endif 1958 __ ldr_literal(Rmethod, metadata_literal); 1959 assert(nativeMovConstReg_at(ldr_site)->next_instruction_address() == __ pc(), "Fix ldr_literal or its parsing"); 1960 bool near_range = __ cache_fully_reachable(); 1961 InlinedAddress dest((address)-1); 1962 if (near_range) { 1963 address branch_site = __ pc(); 1964 __ b(branch_site); // b to self maps to special NativeJump -1 destination 1965 } else { 1966 __ indirect_jump(dest, Rtemp); 1967 } 1968 __ bind_literal(metadata_literal); // includes spec_for_immediate reloc 1969 if (!near_range) { 1970 __ bind_literal(dest); // special NativeJump -1 destination 1971 } 1972 1973 assert(code_offset() - offset <= call_stub_size(), "overflow"); 1974 __ end_a_stub(); 1975 } 1976 1977 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1978 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1979 assert(exceptionPC->as_register() == Rexception_pc, "must match"); 1980 info->add_register_oop(exceptionOop); 1981 1982 Runtime1::StubID handle_id = compilation()->has_fpu_code() ? 1983 Runtime1::handle_exception_id : 1984 Runtime1::handle_exception_nofpu_id; 1985 Label return_address; 1986 __ adr(Rexception_pc, return_address); 1987 __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type); 1988 __ bind(return_address); 1989 add_call_info_here(info); // for exception handler 1990 } 1991 1992 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1993 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1994 __ b(_unwind_handler_entry); 1995 } 1996 1997 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 1998 AsmShift shift = lsl; 1999 switch (code) { 2000 case lir_shl: shift = lsl; break; 2001 case lir_shr: shift = asr; break; 2002 case lir_ushr: shift = lsr; break; 2003 default: ShouldNotReachHere(); 2004 } 2005 2006 if (dest->is_single_cpu()) { 2007 __ andr(Rtemp, count->as_register(), 31); 2008 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp)); 2009 } else if (dest->is_double_cpu()) { 2010 Register dest_lo = dest->as_register_lo(); 2011 Register dest_hi = dest->as_register_hi(); 2012 Register src_lo = left->as_register_lo(); 2013 Register src_hi = left->as_register_hi(); 2014 Register Rcount = count->as_register(); 2015 // Resolve possible register conflicts 2016 if (shift == lsl && dest_hi == src_lo) { 2017 dest_hi = Rtemp; 2018 } else if (shift != lsl && dest_lo == src_hi) { 2019 dest_lo = Rtemp; 2020 } else if (dest_lo == src_lo && dest_hi == src_hi) { 2021 dest_lo = Rtemp; 2022 } else if (dest_lo == Rcount || dest_hi == Rcount) { 2023 Rcount = Rtemp; 2024 } 2025 __ andr(Rcount, count->as_register(), 63); 2026 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount); 2027 move_regs(dest_lo, dest->as_register_lo()); 2028 move_regs(dest_hi, dest->as_register_hi()); 2029 } else { 2030 ShouldNotReachHere(); 2031 } 2032 } 2033 2034 2035 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2036 AsmShift shift = lsl; 2037 switch (code) { 2038 case lir_shl: shift = lsl; break; 2039 case lir_shr: shift = asr; break; 2040 case lir_ushr: shift = lsr; break; 2041 default: ShouldNotReachHere(); 2042 } 2043 2044 if (dest->is_single_cpu()) { 2045 count &= 31; 2046 if (count != 0) { 2047 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count)); 2048 } else { 2049 move_regs(left->as_register(), dest->as_register()); 2050 } 2051 } else if (dest->is_double_cpu()) { 2052 count &= 63; 2053 if (count != 0) { 2054 Register dest_lo = dest->as_register_lo(); 2055 Register dest_hi = dest->as_register_hi(); 2056 Register src_lo = left->as_register_lo(); 2057 Register src_hi = left->as_register_hi(); 2058 // Resolve possible register conflicts 2059 if (shift == lsl && dest_hi == src_lo) { 2060 dest_hi = Rtemp; 2061 } else if (shift != lsl && dest_lo == src_hi) { 2062 dest_lo = Rtemp; 2063 } 2064 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count); 2065 move_regs(dest_lo, dest->as_register_lo()); 2066 move_regs(dest_hi, dest->as_register_hi()); 2067 } else { 2068 __ long_move(dest->as_register_lo(), dest->as_register_hi(), 2069 left->as_register_lo(), left->as_register_hi()); 2070 } 2071 } else { 2072 ShouldNotReachHere(); 2073 } 2074 } 2075 2076 2077 // Saves 4 given registers in reserved argument area. 2078 void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2079 verify_reserved_argument_area_size(4); 2080 __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4)); 2081 } 2082 2083 // Restores 4 given registers from reserved argument area. 2084 void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2085 __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback); 2086 } 2087 2088 2089 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2090 ciArrayKlass* default_type = op->expected_type(); 2091 Register src = op->src()->as_register(); 2092 Register src_pos = op->src_pos()->as_register(); 2093 Register dst = op->dst()->as_register(); 2094 Register dst_pos = op->dst_pos()->as_register(); 2095 Register length = op->length()->as_register(); 2096 Register tmp = op->tmp()->as_register(); 2097 Register tmp2 = Rtemp; 2098 2099 assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption"); 2100 2101 CodeStub* stub = op->stub(); 2102 2103 int flags = op->flags(); 2104 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 2105 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2106 2107 // If we don't know anything or it's an object array, just go through the generic arraycopy 2108 if (default_type == nullptr) { 2109 2110 // save arguments, because they will be killed by a runtime call 2111 save_in_reserved_area(R0, R1, R2, R3); 2112 2113 // pass length argument on SP[0] 2114 __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment 2115 2116 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2117 assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); 2118 #ifndef PRODUCT 2119 if (PrintC1Statistics) { 2120 __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2); 2121 } 2122 #endif // !PRODUCT 2123 // the stub is in the code cache so close enough 2124 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2125 2126 __ add(SP, SP, 2*wordSize); 2127 2128 __ cbz_32(R0, *stub->continuation()); 2129 2130 __ mvn_32(tmp, R0); 2131 restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only 2132 __ sub_32(length, length, tmp); 2133 __ add_32(src_pos, src_pos, tmp); 2134 __ add_32(dst_pos, dst_pos, tmp); 2135 2136 __ b(*stub->entry()); 2137 2138 __ bind(*stub->continuation()); 2139 return; 2140 } 2141 2142 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), 2143 "must be true at this point"); 2144 int elem_size = type2aelembytes(basic_type); 2145 int shift = exact_log2(elem_size); 2146 2147 // Check for null 2148 if (flags & LIR_OpArrayCopy::src_null_check) { 2149 if (flags & LIR_OpArrayCopy::dst_null_check) { 2150 __ cmp(src, 0); 2151 __ cond_cmp(dst, 0, ne); // make one instruction shorter if both checks are needed 2152 __ b(*stub->entry(), eq); 2153 } else { 2154 __ cbz(src, *stub->entry()); 2155 } 2156 } else if (flags & LIR_OpArrayCopy::dst_null_check) { 2157 __ cbz(dst, *stub->entry()); 2158 } 2159 2160 // If the compiler was not able to prove that exact type of the source or the destination 2161 // of the arraycopy is an array type, check at runtime if the source or the destination is 2162 // an instance type. 2163 if (flags & LIR_OpArrayCopy::type_check) { 2164 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2165 __ load_klass(tmp, dst); 2166 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2167 __ mov_slow(tmp, Klass::_lh_neutral_value); 2168 __ cmp_32(tmp2, tmp); 2169 __ b(*stub->entry(), ge); 2170 } 2171 2172 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2173 __ load_klass(tmp, src); 2174 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2175 __ mov_slow(tmp, Klass::_lh_neutral_value); 2176 __ cmp_32(tmp2, tmp); 2177 __ b(*stub->entry(), ge); 2178 } 2179 } 2180 2181 // Check if negative 2182 const int all_positive_checks = LIR_OpArrayCopy::src_pos_positive_check | 2183 LIR_OpArrayCopy::dst_pos_positive_check | 2184 LIR_OpArrayCopy::length_positive_check; 2185 switch (flags & all_positive_checks) { 2186 case LIR_OpArrayCopy::src_pos_positive_check: 2187 __ branch_if_negative_32(src_pos, *stub->entry()); 2188 break; 2189 case LIR_OpArrayCopy::dst_pos_positive_check: 2190 __ branch_if_negative_32(dst_pos, *stub->entry()); 2191 break; 2192 case LIR_OpArrayCopy::length_positive_check: 2193 __ branch_if_negative_32(length, *stub->entry()); 2194 break; 2195 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::dst_pos_positive_check: 2196 __ branch_if_any_negative_32(src_pos, dst_pos, tmp, *stub->entry()); 2197 break; 2198 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2199 __ branch_if_any_negative_32(src_pos, length, tmp, *stub->entry()); 2200 break; 2201 case LIR_OpArrayCopy::dst_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2202 __ branch_if_any_negative_32(dst_pos, length, tmp, *stub->entry()); 2203 break; 2204 case all_positive_checks: 2205 __ branch_if_any_negative_32(src_pos, dst_pos, length, tmp, *stub->entry()); 2206 break; 2207 default: 2208 assert((flags & all_positive_checks) == 0, "the last option"); 2209 } 2210 2211 // Range checks 2212 if (flags & LIR_OpArrayCopy::src_range_check) { 2213 __ ldr_s32(tmp2, Address(src, arrayOopDesc::length_offset_in_bytes())); 2214 __ add_32(tmp, src_pos, length); 2215 __ cmp_32(tmp, tmp2); 2216 __ b(*stub->entry(), hi); 2217 } 2218 if (flags & LIR_OpArrayCopy::dst_range_check) { 2219 __ ldr_s32(tmp2, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2220 __ add_32(tmp, dst_pos, length); 2221 __ cmp_32(tmp, tmp2); 2222 __ b(*stub->entry(), hi); 2223 } 2224 2225 // Check if src and dst are of the same type 2226 if (flags & LIR_OpArrayCopy::type_check) { 2227 // We don't know the array types are compatible 2228 if (basic_type != T_OBJECT) { 2229 // Simple test for basic type arrays 2230 if (UseCompressedClassPointers) { 2231 // We don't need decode because we just need to compare 2232 __ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes())); 2233 __ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes())); 2234 __ cmp_32(tmp, tmp2); 2235 } else { 2236 __ load_klass(tmp, src); 2237 __ load_klass(tmp2, dst); 2238 __ cmp(tmp, tmp2); 2239 } 2240 __ b(*stub->entry(), ne); 2241 } else { 2242 // For object arrays, if src is a sub class of dst then we can 2243 // safely do the copy. 2244 Label cont, slow; 2245 2246 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2247 2248 __ load_klass(tmp, src); 2249 __ load_klass(tmp2, dst); 2250 2251 // We are at a call so all live registers are saved before we 2252 // get here 2253 assert_different_registers(tmp, tmp2, R6, altFP_7_11); 2254 2255 __ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == nullptr ? stub->entry() : &slow, nullptr); 2256 2257 __ mov(R6, R0); 2258 __ mov(altFP_7_11, R1); 2259 __ mov(R0, tmp); 2260 __ mov(R1, tmp2); 2261 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp 2262 __ cmp_32(R0, 0); 2263 __ mov(R0, R6); 2264 __ mov(R1, altFP_7_11); 2265 2266 if (copyfunc_addr != nullptr) { // use stub if available 2267 // src is not a sub class of dst so we have to do a 2268 // per-element check. 2269 2270 __ b(cont, ne); 2271 2272 __ bind(slow); 2273 2274 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2275 if ((flags & mask) != mask) { 2276 // Check that at least both of them object arrays. 2277 assert(flags & mask, "one of the two should be known to be an object array"); 2278 2279 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2280 __ load_klass(tmp, src); 2281 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2282 __ load_klass(tmp, dst); 2283 } 2284 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2285 2286 __ ldr_u32(tmp2, Address(tmp, lh_offset)); 2287 2288 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2289 __ mov_slow(tmp, objArray_lh); 2290 __ cmp_32(tmp, tmp2); 2291 __ b(*stub->entry(), ne); 2292 } 2293 2294 save_in_reserved_area(R0, R1, R2, R3); 2295 2296 Register src_ptr = R0; 2297 Register dst_ptr = R1; 2298 Register len = R2; 2299 Register chk_off = R3; 2300 Register super_k = tmp; 2301 2302 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2303 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2304 2305 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2306 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2307 __ load_klass(tmp, dst); 2308 2309 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2310 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2311 2312 __ ldr(super_k, Address(tmp, ek_offset)); 2313 2314 __ mov(len, length); 2315 __ ldr_u32(chk_off, Address(super_k, sco_offset)); 2316 __ push(super_k); 2317 2318 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2319 2320 #ifndef PRODUCT 2321 if (PrintC1Statistics) { 2322 Label failed; 2323 __ cbnz_32(R0, failed); 2324 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2); 2325 __ bind(failed); 2326 } 2327 #endif // PRODUCT 2328 2329 __ add(SP, SP, wordSize); // Drop super_k argument 2330 2331 __ cbz_32(R0, *stub->continuation()); 2332 __ mvn_32(tmp, R0); 2333 2334 // load saved arguments in slow case only 2335 restore_from_reserved_area(R0, R1, R2, R3); 2336 2337 __ sub_32(length, length, tmp); 2338 __ add_32(src_pos, src_pos, tmp); 2339 __ add_32(dst_pos, dst_pos, tmp); 2340 2341 #ifndef PRODUCT 2342 if (PrintC1Statistics) { 2343 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2); 2344 } 2345 #endif 2346 2347 __ b(*stub->entry()); 2348 2349 __ bind(cont); 2350 } else { 2351 __ b(*stub->entry(), eq); 2352 __ bind(cont); 2353 } 2354 } 2355 } 2356 2357 #ifndef PRODUCT 2358 if (PrintC1Statistics) { 2359 address counter = Runtime1::arraycopy_count_address(basic_type); 2360 __ inc_counter(counter, tmp, tmp2); 2361 } 2362 #endif // !PRODUCT 2363 2364 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2365 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2366 const char *name; 2367 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2368 2369 Register src_ptr = R0; 2370 Register dst_ptr = R1; 2371 Register len = R2; 2372 2373 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2374 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2375 2376 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2377 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2378 2379 __ mov(len, length); 2380 2381 __ call(entry, relocInfo::runtime_call_type); 2382 2383 __ bind(*stub->continuation()); 2384 } 2385 2386 #ifdef ASSERT 2387 // emit run-time assertion 2388 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2389 assert(op->code() == lir_assert, "must be"); 2390 2391 if (op->in_opr1()->is_valid()) { 2392 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 2393 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 2394 } else { 2395 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 2396 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 2397 } 2398 2399 Label ok; 2400 if (op->condition() != lir_cond_always) { 2401 AsmCondition acond = al; 2402 switch (op->condition()) { 2403 case lir_cond_equal: acond = eq; break; 2404 case lir_cond_notEqual: acond = ne; break; 2405 case lir_cond_less: acond = lt; break; 2406 case lir_cond_lessEqual: acond = le; break; 2407 case lir_cond_greaterEqual: acond = ge; break; 2408 case lir_cond_greater: acond = gt; break; 2409 case lir_cond_aboveEqual: acond = hs; break; 2410 case lir_cond_belowEqual: acond = ls; break; 2411 default: ShouldNotReachHere(); 2412 } 2413 __ b(ok, acond); 2414 } 2415 if (op->halt()) { 2416 const char* str = __ code_string(op->msg()); 2417 __ stop(str); 2418 } else { 2419 breakpoint(); 2420 } 2421 __ bind(ok); 2422 } 2423 #endif // ASSERT 2424 2425 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2426 fatal("CRC32 intrinsic is not implemented on this platform"); 2427 } 2428 2429 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2430 Register obj = op->obj_opr()->as_pointer_register(); 2431 Register hdr = op->hdr_opr()->as_pointer_register(); 2432 Register lock = op->lock_opr()->as_pointer_register(); 2433 2434 if (LockingMode == LM_MONITOR) { 2435 if (op->info() != nullptr) { 2436 add_debug_info_for_null_check_here(op->info()); 2437 __ null_check(obj); 2438 } 2439 __ b(*op->stub()->entry()); 2440 } else if (op->code() == lir_lock) { 2441 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2442 int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2443 if (op->info() != nullptr) { 2444 add_debug_info_for_null_check(null_check_offset, op->info()); 2445 } 2446 } else if (op->code() == lir_unlock) { 2447 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2448 } else { 2449 ShouldNotReachHere(); 2450 } 2451 __ bind(*op->stub()->continuation()); 2452 } 2453 2454 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 2455 Register obj = op->obj()->as_pointer_register(); 2456 Register result = op->result_opr()->as_pointer_register(); 2457 2458 CodeEmitInfo* info = op->info(); 2459 if (info != nullptr) { 2460 add_debug_info_for_null_check_here(info); 2461 } 2462 2463 if (UseCompressedClassPointers) { // On 32 bit arm?? 2464 __ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2465 } else { 2466 __ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2467 } 2468 } 2469 2470 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2471 ciMethod* method = op->profiled_method(); 2472 int bci = op->profiled_bci(); 2473 ciMethod* callee = op->profiled_callee(); 2474 2475 // Update counter for all call types 2476 ciMethodData* md = method->method_data_or_null(); 2477 assert(md != nullptr, "Sanity"); 2478 ciProfileData* data = md->bci_to_data(bci); 2479 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 2480 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2481 Register mdo = op->mdo()->as_register(); 2482 assert(op->tmp1()->is_register(), "tmp1 must be allocated"); 2483 Register tmp1 = op->tmp1()->as_pointer_register(); 2484 assert_different_registers(mdo, tmp1); 2485 __ mov_metadata(mdo, md->constant_encoding()); 2486 int mdo_offset_bias = 0; 2487 int max_offset = 4096; 2488 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) { 2489 // The offset is large so bias the mdo by the base of the slot so 2490 // that the ldr can use an immediate offset to reference the slots of the data 2491 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2492 __ mov_slow(tmp1, mdo_offset_bias); 2493 __ add(mdo, mdo, tmp1); 2494 } 2495 2496 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2497 // Perform additional virtual call profiling for invokevirtual and 2498 // invokeinterface bytecodes 2499 if (op->should_profile_receiver_type()) { 2500 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2501 Register recv = op->recv()->as_register(); 2502 assert_different_registers(mdo, tmp1, recv); 2503 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2504 ciKlass* known_klass = op->known_holder(); 2505 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 2506 // We know the type that will be seen at this call site; we can 2507 // statically update the MethodData* rather than needing to do 2508 // dynamic tests on the receiver type 2509 2510 // NOTE: we should probably put a lock around this search to 2511 // avoid collisions by concurrent compilations 2512 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2513 uint i; 2514 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2515 ciKlass* receiver = vc_data->receiver(i); 2516 if (known_klass->equals(receiver)) { 2517 Address data_addr(mdo, md->byte_offset_of_slot(data, 2518 VirtualCallData::receiver_count_offset(i)) - 2519 mdo_offset_bias); 2520 __ ldr(tmp1, data_addr); 2521 __ add(tmp1, tmp1, DataLayout::counter_increment); 2522 __ str(tmp1, data_addr); 2523 return; 2524 } 2525 } 2526 2527 // Receiver type not found in profile data; select an empty slot 2528 2529 // Note that this is less efficient than it should be because it 2530 // always does a write to the receiver part of the 2531 // VirtualCallData rather than just the first time 2532 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2533 ciKlass* receiver = vc_data->receiver(i); 2534 if (receiver == nullptr) { 2535 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 2536 mdo_offset_bias); 2537 __ mov_metadata(tmp1, known_klass->constant_encoding()); 2538 __ str(tmp1, recv_addr); 2539 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 2540 mdo_offset_bias); 2541 __ ldr(tmp1, data_addr); 2542 __ add(tmp1, tmp1, DataLayout::counter_increment); 2543 __ str(tmp1, data_addr); 2544 return; 2545 } 2546 } 2547 } else { 2548 __ load_klass(recv, recv); 2549 Label update_done; 2550 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2551 // Receiver did not match any saved receiver and there is no empty row for it. 2552 // Increment total counter to indicate polymorphic case. 2553 __ ldr(tmp1, counter_addr); 2554 __ add(tmp1, tmp1, DataLayout::counter_increment); 2555 __ str(tmp1, counter_addr); 2556 2557 __ bind(update_done); 2558 } 2559 } else { 2560 // Static call 2561 __ ldr(tmp1, counter_addr); 2562 __ add(tmp1, tmp1, DataLayout::counter_increment); 2563 __ str(tmp1, counter_addr); 2564 } 2565 } 2566 2567 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2568 fatal("Type profiling not implemented on this platform"); 2569 } 2570 2571 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 2572 Unimplemented(); 2573 } 2574 2575 2576 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 2577 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2578 __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp()); 2579 } 2580 2581 2582 void LIR_Assembler::align_backward_branch_target() { 2583 // Some ARM processors do better with 8-byte branch target alignment 2584 __ align(8); 2585 } 2586 2587 2588 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2589 // tmp must be unused 2590 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2591 2592 if (left->is_single_cpu()) { 2593 assert (dest->type() == T_INT, "unexpected result type"); 2594 assert (left->type() == T_INT, "unexpected left type"); 2595 __ neg_32(dest->as_register(), left->as_register()); 2596 } else if (left->is_double_cpu()) { 2597 Register dest_lo = dest->as_register_lo(); 2598 Register dest_hi = dest->as_register_hi(); 2599 Register src_lo = left->as_register_lo(); 2600 Register src_hi = left->as_register_hi(); 2601 if (dest_lo == src_hi) { 2602 dest_lo = Rtemp; 2603 } 2604 __ rsbs(dest_lo, src_lo, 0); 2605 __ rsc(dest_hi, src_hi, 0); 2606 move_regs(dest_lo, dest->as_register_lo()); 2607 } else if (left->is_single_fpu()) { 2608 __ neg_float(dest->as_float_reg(), left->as_float_reg()); 2609 } else if (left->is_double_fpu()) { 2610 __ neg_double(dest->as_double_reg(), left->as_double_reg()); 2611 } else { 2612 ShouldNotReachHere(); 2613 } 2614 } 2615 2616 2617 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2618 assert(patch_code == lir_patch_none, "Patch code not supported"); 2619 LIR_Address* addr = addr_opr->as_address_ptr(); 2620 if (addr->index()->is_illegal()) { 2621 jint c = addr->disp(); 2622 if (!Assembler::is_arith_imm_in_range(c)) { 2623 BAILOUT("illegal arithmetic operand"); 2624 } 2625 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c); 2626 } else { 2627 assert(addr->disp() == 0, "cannot handle otherwise"); 2628 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), 2629 AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale())); 2630 } 2631 } 2632 2633 2634 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2635 assert(!tmp->is_valid(), "don't need temporary"); 2636 __ call(dest); 2637 if (info != nullptr) { 2638 add_call_info_here(info); 2639 } 2640 } 2641 2642 2643 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2644 assert(src->is_double_cpu() && dest->is_address() || 2645 src->is_address() && dest->is_double_cpu(), 2646 "Simple move_op is called for all other cases"); 2647 2648 int null_check_offset; 2649 if (dest->is_address()) { 2650 // Store 2651 const LIR_Address* addr = dest->as_address_ptr(); 2652 const Register src_lo = src->as_register_lo(); 2653 const Register src_hi = src->as_register_hi(); 2654 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2655 2656 if (src_lo < src_hi) { 2657 null_check_offset = __ offset(); 2658 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi)); 2659 } else { 2660 assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2661 __ mov(Rtemp, src_hi); 2662 null_check_offset = __ offset(); 2663 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp)); 2664 } 2665 } else { 2666 // Load 2667 const LIR_Address* addr = src->as_address_ptr(); 2668 const Register dest_lo = dest->as_register_lo(); 2669 const Register dest_hi = dest->as_register_hi(); 2670 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2671 2672 null_check_offset = __ offset(); 2673 if (dest_lo < dest_hi) { 2674 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi)); 2675 } else { 2676 assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2677 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp)); 2678 __ mov(dest_hi, Rtemp); 2679 } 2680 } 2681 2682 if (info != nullptr) { 2683 add_debug_info_for_null_check(null_check_offset, info); 2684 } 2685 } 2686 2687 2688 void LIR_Assembler::membar() { 2689 __ membar(MacroAssembler::StoreLoad, Rtemp); 2690 } 2691 2692 void LIR_Assembler::membar_acquire() { 2693 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 2694 } 2695 2696 void LIR_Assembler::membar_release() { 2697 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2698 } 2699 2700 void LIR_Assembler::membar_loadload() { 2701 __ membar(MacroAssembler::LoadLoad, Rtemp); 2702 } 2703 2704 void LIR_Assembler::membar_storestore() { 2705 __ membar(MacroAssembler::StoreStore, Rtemp); 2706 } 2707 2708 void LIR_Assembler::membar_loadstore() { 2709 __ membar(MacroAssembler::LoadStore, Rtemp); 2710 } 2711 2712 void LIR_Assembler::membar_storeload() { 2713 __ membar(MacroAssembler::StoreLoad, Rtemp); 2714 } 2715 2716 void LIR_Assembler::on_spin_wait() { 2717 Unimplemented(); 2718 } 2719 2720 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2721 // Not used on ARM 2722 Unimplemented(); 2723 } 2724 2725 void LIR_Assembler::peephole(LIR_List* lir) { 2726 LIR_OpList* inst = lir->instructions_list(); 2727 const int inst_length = inst->length(); 2728 for (int i = 0; i < inst_length; i++) { 2729 LIR_Op* op = inst->at(i); 2730 switch (op->code()) { 2731 case lir_cmp: { 2732 // Replace: 2733 // cmp rX, y 2734 // cmove [EQ] y, z, rX 2735 // with 2736 // cmp rX, y 2737 // cmove [EQ] illegalOpr, z, rX 2738 // 2739 // or 2740 // cmp rX, y 2741 // cmove [NE] z, y, rX 2742 // with 2743 // cmp rX, y 2744 // cmove [NE] z, illegalOpr, rX 2745 // 2746 // moves from illegalOpr should be removed when converting LIR to native assembly 2747 2748 LIR_Op2* cmp = op->as_Op2(); 2749 assert(cmp != nullptr, "cmp LIR instruction is not an op2"); 2750 2751 if (i + 1 < inst_length) { 2752 LIR_Op2* cmove = inst->at(i + 1)->as_Op2(); 2753 if (cmove != nullptr && cmove->code() == lir_cmove) { 2754 LIR_Opr cmove_res = cmove->result_opr(); 2755 bool res_is_op1 = cmove_res == cmp->in_opr1(); 2756 bool res_is_op2 = cmove_res == cmp->in_opr2(); 2757 LIR_Opr cmp_res, cmp_arg; 2758 if (res_is_op1) { 2759 cmp_res = cmp->in_opr1(); 2760 cmp_arg = cmp->in_opr2(); 2761 } else if (res_is_op2) { 2762 cmp_res = cmp->in_opr2(); 2763 cmp_arg = cmp->in_opr1(); 2764 } else { 2765 cmp_res = LIR_OprFact::illegalOpr; 2766 cmp_arg = LIR_OprFact::illegalOpr; 2767 } 2768 2769 if (cmp_res != LIR_OprFact::illegalOpr) { 2770 LIR_Condition cond = cmove->condition(); 2771 if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) { 2772 cmove->set_in_opr1(LIR_OprFact::illegalOpr); 2773 } else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) { 2774 cmove->set_in_opr2(LIR_OprFact::illegalOpr); 2775 } 2776 } 2777 } 2778 } 2779 break; 2780 } 2781 2782 default: 2783 break; 2784 } 2785 } 2786 } 2787 2788 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2789 assert(src->is_address(), "sanity"); 2790 Address addr = as_Address(src->as_address_ptr()); 2791 2792 if (code == lir_xchg) { 2793 } else { 2794 assert (!data->is_oop(), "xadd for oops"); 2795 } 2796 2797 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2798 2799 Label retry; 2800 __ bind(retry); 2801 2802 if (data->type() == T_INT || data->is_oop()) { 2803 Register dst = dest->as_register(); 2804 Register new_val = noreg; 2805 __ ldrex(dst, addr); 2806 if (code == lir_xadd) { 2807 Register tmp_reg = tmp->as_register(); 2808 if (data->is_constant()) { 2809 assert_different_registers(dst, tmp_reg); 2810 __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint()); 2811 } else { 2812 assert_different_registers(dst, tmp_reg, data->as_register()); 2813 __ add_32(tmp_reg, dst, data->as_register()); 2814 } 2815 new_val = tmp_reg; 2816 } else { 2817 if (UseCompressedOops && data->is_oop()) { 2818 new_val = tmp->as_pointer_register(); 2819 } else { 2820 new_val = data->as_register(); 2821 } 2822 assert_different_registers(dst, new_val); 2823 } 2824 __ strex(Rtemp, new_val, addr); 2825 2826 } else if (data->type() == T_LONG) { 2827 Register dst_lo = dest->as_register_lo(); 2828 Register new_val_lo = noreg; 2829 Register dst_hi = dest->as_register_hi(); 2830 2831 assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair"); 2832 assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2833 2834 __ bind(retry); 2835 __ ldrexd(dst_lo, addr); 2836 if (code == lir_xadd) { 2837 Register tmp_lo = tmp->as_register_lo(); 2838 Register tmp_hi = tmp->as_register_hi(); 2839 2840 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 2841 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2842 2843 if (data->is_constant()) { 2844 jlong c = data->as_constant_ptr()->as_jlong(); 2845 assert((jlong)((jint)c) == c, "overflow"); 2846 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi); 2847 __ adds(tmp_lo, dst_lo, (jint)c); 2848 __ adc(tmp_hi, dst_hi, 0); 2849 } else { 2850 Register new_val_lo = data->as_register_lo(); 2851 Register new_val_hi = data->as_register_hi(); 2852 __ adds(tmp_lo, dst_lo, new_val_lo); 2853 __ adc(tmp_hi, dst_hi, new_val_hi); 2854 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi); 2855 } 2856 new_val_lo = tmp_lo; 2857 } else { 2858 new_val_lo = data->as_register_lo(); 2859 Register new_val_hi = data->as_register_hi(); 2860 2861 assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi); 2862 assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair"); 2863 assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2864 } 2865 __ strexd(Rtemp, new_val_lo, addr); 2866 } else { 2867 ShouldNotReachHere(); 2868 } 2869 2870 __ cbnz_32(Rtemp, retry); 2871 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 2872 2873 } 2874 2875 #undef __