1 /* 2 * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "memory/universe.hpp" 36 #include "nativeInst_arm.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/powerOfTwo.hpp" 42 #include "vmreg_arm.inline.hpp" 43 44 #define __ _masm-> 45 46 // Note: Rtemp usage is this file should not impact C2 and should be 47 // correct as long as it is not implicitly used in lower layers (the 48 // arm [macro]assembler) and used with care in the other C1 specific 49 // files. 50 51 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 52 ShouldNotCallThis(); // Not used on ARM 53 return false; 54 } 55 56 57 LIR_Opr LIR_Assembler::receiverOpr() { 58 // The first register in Java calling conventions 59 return FrameMap::R0_oop_opr; 60 } 61 62 LIR_Opr LIR_Assembler::osrBufferPointer() { 63 return FrameMap::as_pointer_opr(R0); 64 } 65 66 #ifndef PRODUCT 67 void LIR_Assembler::verify_reserved_argument_area_size(int args_count) { 68 assert(args_count * wordSize <= frame_map()->reserved_argument_area_size(), "not enough space for arguments"); 69 } 70 #endif // !PRODUCT 71 72 void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) { 73 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 74 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 75 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 76 __ mov_slow(Rtemp, c); 77 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 78 } 79 80 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_sp_in_words) { 81 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 82 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 83 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 84 __ mov_metadata(Rtemp, m); 85 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 86 } 87 88 //--------------fpu register translations----------------------- 89 90 91 void LIR_Assembler::breakpoint() { 92 __ breakpoint(); 93 } 94 95 void LIR_Assembler::push(LIR_Opr opr) { 96 Unimplemented(); 97 } 98 99 void LIR_Assembler::pop(LIR_Opr opr) { 100 Unimplemented(); 101 } 102 103 //------------------------------------------- 104 Address LIR_Assembler::as_Address(LIR_Address* addr) { 105 Register base = addr->base()->as_pointer_register(); 106 107 108 if (addr->index()->is_illegal() || addr->index()->is_constant()) { 109 int offset = addr->disp(); 110 if (addr->index()->is_constant()) { 111 offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale(); 112 } 113 114 if ((offset <= -4096) || (offset >= 4096)) { 115 BAILOUT_("offset not in range", Address(base)); 116 } 117 118 return Address(base, offset); 119 120 } else { 121 assert(addr->disp() == 0, "can't have both"); 122 int scale = addr->scale(); 123 124 assert(addr->index()->is_single_cpu(), "should be"); 125 return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) : 126 Address(base, addr->index()->as_register(), lsr, -scale); 127 } 128 } 129 130 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 131 Address base = as_Address(addr); 132 assert(base.index() == noreg, "must be"); 133 if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); } 134 return Address(base.base(), base.disp() + BytesPerWord); 135 } 136 137 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 138 return as_Address(addr); 139 } 140 141 142 void LIR_Assembler::osr_entry() { 143 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 144 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 145 ValueStack* entry_state = osr_entry->end()->state(); 146 int number_of_locks = entry_state->locks_size(); 147 148 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 149 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 150 151 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 152 int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord; 153 for (int i = 0; i < number_of_locks; i++) { 154 int slot_offset = monitor_offset - (i * 2 * BytesPerWord); 155 __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord)); 156 __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 157 __ str(R1, frame_map()->address_for_monitor_lock(i)); 158 __ str(R2, frame_map()->address_for_monitor_object(i)); 159 } 160 } 161 162 163 int LIR_Assembler::check_icache() { 164 Register receiver = LIR_Assembler::receiverOpr()->as_register(); 165 int offset = __ offset(); 166 __ inline_cache_check(receiver, Ricklass); 167 return offset; 168 } 169 170 void LIR_Assembler::clinit_barrier(ciMethod* method) { 171 ShouldNotReachHere(); // not implemented 172 } 173 174 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 175 jobject o = (jobject)Universe::non_oop_word(); 176 int index = __ oop_recorder()->allocate_oop_index(o); 177 178 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index); 179 180 __ patchable_mov_oop(reg, o, index); 181 patching_epilog(patch, lir_patch_normal, reg, info); 182 } 183 184 185 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 186 Metadata* o = (Metadata*)Universe::non_oop_word(); 187 int index = __ oop_recorder()->allocate_metadata_index(o); 188 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 189 190 __ patchable_mov_metadata(reg, o, index); 191 patching_epilog(patch, lir_patch_normal, reg, info); 192 } 193 194 195 int LIR_Assembler::initial_frame_size_in_bytes() const { 196 // Subtracts two words to account for return address and link 197 return frame_map()->framesize()*VMRegImpl::stack_slot_size - 2*wordSize; 198 } 199 200 201 int LIR_Assembler::emit_exception_handler() { 202 address handler_base = __ start_a_stub(exception_handler_size()); 203 if (handler_base == nullptr) { 204 bailout("exception handler overflow"); 205 return -1; 206 } 207 208 int offset = code_offset(); 209 210 // check that there is really an exception 211 __ verify_not_null_oop(Rexception_obj); 212 213 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 214 __ should_not_reach_here(); 215 216 assert(code_offset() - offset <= exception_handler_size(), "overflow"); 217 __ end_a_stub(); 218 219 return offset; 220 } 221 222 // Emit the code to remove the frame from the stack in the exception 223 // unwind path. 224 int LIR_Assembler::emit_unwind_handler() { 225 #ifndef PRODUCT 226 if (CommentedAssembly) { 227 _masm->block_comment("Unwind handler"); 228 } 229 #endif 230 231 int offset = code_offset(); 232 233 // Fetch the exception from TLS and clear out exception related thread state 234 Register zero = __ zero_register(Rtemp); 235 __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset())); 236 __ str(zero, Address(Rthread, JavaThread::exception_oop_offset())); 237 __ str(zero, Address(Rthread, JavaThread::exception_pc_offset())); 238 239 __ bind(_unwind_handler_entry); 240 __ verify_not_null_oop(Rexception_obj); 241 242 // Perform needed unlocking 243 MonitorExitStub* stub = nullptr; 244 if (method()->is_synchronized()) { 245 monitor_address(0, FrameMap::R0_opr); 246 stub = new MonitorExitStub(FrameMap::R0_opr, true, 0); 247 __ unlock_object(R2, R1, R0, *stub->entry()); 248 __ bind(*stub->continuation()); 249 } 250 251 // remove the activation and dispatch to the unwind handler 252 __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR 253 __ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp); 254 255 // Emit the slow path assembly 256 if (stub != nullptr) { 257 stub->emit_code(this); 258 } 259 260 return offset; 261 } 262 263 264 int LIR_Assembler::emit_deopt_handler() { 265 address handler_base = __ start_a_stub(deopt_handler_size()); 266 if (handler_base == nullptr) { 267 bailout("deopt handler overflow"); 268 return -1; 269 } 270 271 int offset = code_offset(); 272 273 __ mov_relative_address(LR, __ pc()); 274 __ push(LR); // stub expects LR to be saved 275 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg); 276 277 assert(code_offset() - offset <= deopt_handler_size(), "overflow"); 278 __ end_a_stub(); 279 280 return offset; 281 } 282 283 284 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 285 // Pop the frame before safepoint polling 286 __ remove_frame(initial_frame_size_in_bytes()); 287 __ read_polling_page(Rtemp, relocInfo::poll_return_type); 288 __ ret(); 289 } 290 291 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 292 293 int offset = __ offset(); 294 __ get_polling_page(Rtemp); 295 __ relocate(relocInfo::poll_type); 296 add_debug_info_for_branch(info); // help pc_desc_at to find correct scope for current PC 297 __ ldr(Rtemp, Address(Rtemp)); 298 299 return offset; 300 } 301 302 303 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 304 if (from_reg != to_reg) { 305 __ mov(to_reg, from_reg); 306 } 307 } 308 309 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 310 assert(src->is_constant() && dest->is_register(), "must be"); 311 LIR_Const* c = src->as_constant_ptr(); 312 313 switch (c->type()) { 314 case T_ADDRESS: 315 case T_INT: 316 assert(patch_code == lir_patch_none, "no patching handled here"); 317 __ mov_slow(dest->as_register(), c->as_jint()); 318 break; 319 320 case T_LONG: 321 assert(patch_code == lir_patch_none, "no patching handled here"); 322 __ mov_slow(dest->as_register_lo(), c->as_jint_lo()); 323 __ mov_slow(dest->as_register_hi(), c->as_jint_hi()); 324 break; 325 326 case T_OBJECT: 327 if (patch_code == lir_patch_none) { 328 __ mov_oop(dest->as_register(), c->as_jobject()); 329 } else { 330 jobject2reg_with_patching(dest->as_register(), info); 331 } 332 break; 333 334 case T_METADATA: 335 if (patch_code == lir_patch_none) { 336 __ mov_metadata(dest->as_register(), c->as_metadata()); 337 } else { 338 klass2reg_with_patching(dest->as_register(), info); 339 } 340 break; 341 342 case T_FLOAT: 343 if (dest->is_single_fpu()) { 344 __ mov_float(dest->as_float_reg(), c->as_jfloat()); 345 } else { 346 // Simple getters can return float constant directly into r0 347 __ mov_slow(dest->as_register(), c->as_jint_bits()); 348 } 349 break; 350 351 case T_DOUBLE: 352 if (dest->is_double_fpu()) { 353 __ mov_double(dest->as_double_reg(), c->as_jdouble()); 354 } else { 355 // Simple getters can return double constant directly into r1r0 356 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits()); 357 __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits()); 358 } 359 break; 360 361 default: 362 ShouldNotReachHere(); 363 } 364 } 365 366 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 367 assert(src->is_constant(), "must be"); 368 assert(dest->is_stack(), "must be"); 369 LIR_Const* c = src->as_constant_ptr(); 370 371 switch (c->type()) { 372 case T_INT: // fall through 373 case T_FLOAT: 374 __ mov_slow(Rtemp, c->as_jint_bits()); 375 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 376 break; 377 378 case T_ADDRESS: 379 __ mov_slow(Rtemp, c->as_jint()); 380 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 381 break; 382 383 case T_OBJECT: 384 __ mov_oop(Rtemp, c->as_jobject()); 385 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 386 break; 387 388 case T_LONG: // fall through 389 case T_DOUBLE: 390 __ mov_slow(Rtemp, c->as_jint_lo_bits()); 391 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 392 if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) { 393 __ mov_slow(Rtemp, c->as_jint_hi_bits()); 394 } 395 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 396 break; 397 398 default: 399 ShouldNotReachHere(); 400 } 401 } 402 403 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 404 CodeEmitInfo* info, bool wide) { 405 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == nullptr),"cannot handle otherwise"); 406 __ mov(Rtemp, 0); 407 408 int null_check_offset = code_offset(); 409 __ str(Rtemp, as_Address(dest->as_address_ptr())); 410 411 if (info != nullptr) { 412 assert(false, "arm32 didn't support this before, investigate if bug"); 413 add_debug_info_for_null_check(null_check_offset, info); 414 } 415 } 416 417 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 418 assert(src->is_register() && dest->is_register(), "must be"); 419 420 if (src->is_single_cpu()) { 421 if (dest->is_single_cpu()) { 422 move_regs(src->as_register(), dest->as_register()); 423 } else if (dest->is_single_fpu()) { 424 __ fmsr(dest->as_float_reg(), src->as_register()); 425 } else { 426 ShouldNotReachHere(); 427 } 428 } else if (src->is_double_cpu()) { 429 if (dest->is_double_cpu()) { 430 __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi()); 431 } else { 432 __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi()); 433 } 434 } else if (src->is_single_fpu()) { 435 if (dest->is_single_fpu()) { 436 __ mov_float(dest->as_float_reg(), src->as_float_reg()); 437 } else if (dest->is_single_cpu()) { 438 __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg()); 439 } else { 440 ShouldNotReachHere(); 441 } 442 } else if (src->is_double_fpu()) { 443 if (dest->is_double_fpu()) { 444 __ mov_double(dest->as_double_reg(), src->as_double_reg()); 445 } else if (dest->is_double_cpu()) { 446 __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg()); 447 } else { 448 ShouldNotReachHere(); 449 } 450 } else { 451 ShouldNotReachHere(); 452 } 453 } 454 455 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 456 assert(src->is_register(), "should not call otherwise"); 457 assert(dest->is_stack(), "should not call otherwise"); 458 459 Address addr = dest->is_single_word() ? 460 frame_map()->address_for_slot(dest->single_stack_ix()) : 461 frame_map()->address_for_slot(dest->double_stack_ix()); 462 463 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 464 if (src->is_single_fpu() || src->is_double_fpu()) { 465 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 466 } 467 468 if (src->is_single_cpu()) { 469 switch (type) { 470 case T_OBJECT: 471 case T_ARRAY: __ verify_oop(src->as_register()); // fall through 472 case T_ADDRESS: 473 case T_METADATA: __ str(src->as_register(), addr); break; 474 case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through 475 case T_INT: __ str_32(src->as_register(), addr); break; 476 default: 477 ShouldNotReachHere(); 478 } 479 } else if (src->is_double_cpu()) { 480 __ str(src->as_register_lo(), addr); 481 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 482 } else if (src->is_single_fpu()) { 483 __ str_float(src->as_float_reg(), addr); 484 } else if (src->is_double_fpu()) { 485 __ str_double(src->as_double_reg(), addr); 486 } else { 487 ShouldNotReachHere(); 488 } 489 } 490 491 492 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 493 LIR_PatchCode patch_code, CodeEmitInfo* info, 494 bool pop_fpu_stack, bool wide) { 495 LIR_Address* to_addr = dest->as_address_ptr(); 496 Register base_reg = to_addr->base()->as_pointer_register(); 497 const bool needs_patching = (patch_code != lir_patch_none); 498 499 PatchingStub* patch = nullptr; 500 if (needs_patching) { 501 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 502 } 503 504 int null_check_offset = code_offset(); 505 506 switch (type) { 507 case T_ARRAY: 508 case T_OBJECT: 509 if (UseCompressedOops && !wide) { 510 ShouldNotReachHere(); 511 } else { 512 __ str(src->as_register(), as_Address(to_addr)); 513 } 514 break; 515 516 case T_ADDRESS: 517 __ str(src->as_pointer_register(), as_Address(to_addr)); 518 break; 519 520 case T_BYTE: 521 case T_BOOLEAN: 522 __ strb(src->as_register(), as_Address(to_addr)); 523 break; 524 525 case T_CHAR: 526 case T_SHORT: 527 __ strh(src->as_register(), as_Address(to_addr)); 528 break; 529 530 case T_INT: 531 #ifdef __SOFTFP__ 532 case T_FLOAT: 533 #endif // __SOFTFP__ 534 __ str_32(src->as_register(), as_Address(to_addr)); 535 break; 536 537 538 #ifdef __SOFTFP__ 539 case T_DOUBLE: 540 #endif // __SOFTFP__ 541 case T_LONG: { 542 Register from_lo = src->as_register_lo(); 543 Register from_hi = src->as_register_hi(); 544 if (to_addr->index()->is_register()) { 545 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 546 assert(to_addr->disp() == 0, "Not yet supporting both"); 547 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 548 base_reg = Rtemp; 549 __ str(from_lo, Address(Rtemp)); 550 if (patch != nullptr) { 551 __ nop(); // see comment before patching_epilog for 2nd str 552 patching_epilog(patch, lir_patch_low, base_reg, info); 553 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 554 patch_code = lir_patch_high; 555 } 556 __ str(from_hi, Address(Rtemp, BytesPerWord)); 557 } else if (base_reg == from_lo) { 558 __ str(from_hi, as_Address_hi(to_addr)); 559 if (patch != nullptr) { 560 __ nop(); // see comment before patching_epilog for 2nd str 561 patching_epilog(patch, lir_patch_high, base_reg, info); 562 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 563 patch_code = lir_patch_low; 564 } 565 __ str(from_lo, as_Address_lo(to_addr)); 566 } else { 567 __ str(from_lo, as_Address_lo(to_addr)); 568 if (patch != nullptr) { 569 __ nop(); // see comment before patching_epilog for 2nd str 570 patching_epilog(patch, lir_patch_low, base_reg, info); 571 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 572 patch_code = lir_patch_high; 573 } 574 __ str(from_hi, as_Address_hi(to_addr)); 575 } 576 break; 577 } 578 579 #ifndef __SOFTFP__ 580 case T_FLOAT: 581 if (to_addr->index()->is_register()) { 582 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 583 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 584 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 585 __ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp())); 586 } else { 587 __ fsts(src->as_float_reg(), as_Address(to_addr)); 588 } 589 break; 590 591 case T_DOUBLE: 592 if (to_addr->index()->is_register()) { 593 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 594 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 595 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 596 __ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp())); 597 } else { 598 __ fstd(src->as_double_reg(), as_Address(to_addr)); 599 } 600 break; 601 #endif // __SOFTFP__ 602 603 604 default: 605 ShouldNotReachHere(); 606 } 607 608 if (info != nullptr) { 609 add_debug_info_for_null_check(null_check_offset, info); 610 } 611 612 if (patch != nullptr) { 613 // Offset embedded into LDR/STR instruction may appear not enough 614 // to address a field. So, provide a space for one more instruction 615 // that will deal with larger offsets. 616 __ nop(); 617 patching_epilog(patch, patch_code, base_reg, info); 618 } 619 } 620 621 622 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 623 assert(src->is_stack(), "should not call otherwise"); 624 assert(dest->is_register(), "should not call otherwise"); 625 626 Address addr = src->is_single_word() ? 627 frame_map()->address_for_slot(src->single_stack_ix()) : 628 frame_map()->address_for_slot(src->double_stack_ix()); 629 630 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 631 if (dest->is_single_fpu() || dest->is_double_fpu()) { 632 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 633 } 634 635 if (dest->is_single_cpu()) { 636 switch (type) { 637 case T_OBJECT: 638 case T_ARRAY: 639 case T_ADDRESS: 640 case T_METADATA: __ ldr(dest->as_register(), addr); break; 641 case T_FLOAT: // used in floatToRawIntBits intrinsic implementation 642 case T_INT: __ ldr_u32(dest->as_register(), addr); break; 643 default: 644 ShouldNotReachHere(); 645 } 646 if ((type == T_OBJECT) || (type == T_ARRAY)) { 647 __ verify_oop(dest->as_register()); 648 } 649 } else if (dest->is_double_cpu()) { 650 __ ldr(dest->as_register_lo(), addr); 651 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 652 } else if (dest->is_single_fpu()) { 653 __ ldr_float(dest->as_float_reg(), addr); 654 } else if (dest->is_double_fpu()) { 655 __ ldr_double(dest->as_double_reg(), addr); 656 } else { 657 ShouldNotReachHere(); 658 } 659 } 660 661 662 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 663 if (src->is_single_stack()) { 664 switch (src->type()) { 665 case T_OBJECT: 666 case T_ARRAY: 667 case T_ADDRESS: 668 case T_METADATA: 669 __ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 670 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 671 break; 672 673 case T_INT: 674 case T_FLOAT: 675 __ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 676 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 677 break; 678 679 default: 680 ShouldNotReachHere(); 681 } 682 } else { 683 assert(src->is_double_stack(), "must be"); 684 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes)); 685 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 686 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 687 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 688 } 689 } 690 691 692 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, 693 LIR_PatchCode patch_code, CodeEmitInfo* info, 694 bool wide) { 695 assert(src->is_address(), "should not call otherwise"); 696 assert(dest->is_register(), "should not call otherwise"); 697 LIR_Address* addr = src->as_address_ptr(); 698 699 Register base_reg = addr->base()->as_pointer_register(); 700 701 PatchingStub* patch = nullptr; 702 if (patch_code != lir_patch_none) { 703 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 704 } 705 if (info != nullptr) { 706 add_debug_info_for_null_check_here(info); 707 } 708 709 switch (type) { 710 case T_OBJECT: // fall through 711 case T_ARRAY: 712 if (UseCompressedOops && !wide) { 713 __ ldr_u32(dest->as_register(), as_Address(addr)); 714 } else { 715 __ ldr(dest->as_register(), as_Address(addr)); 716 } 717 break; 718 719 case T_ADDRESS: 720 __ ldr(dest->as_pointer_register(), as_Address(addr)); 721 break; 722 723 case T_INT: 724 #ifdef __SOFTFP__ 725 case T_FLOAT: 726 #endif // __SOFTFP__ 727 __ ldr(dest->as_pointer_register(), as_Address(addr)); 728 break; 729 730 case T_BOOLEAN: 731 __ ldrb(dest->as_register(), as_Address(addr)); 732 break; 733 734 case T_BYTE: 735 __ ldrsb(dest->as_register(), as_Address(addr)); 736 break; 737 738 case T_CHAR: 739 __ ldrh(dest->as_register(), as_Address(addr)); 740 break; 741 742 case T_SHORT: 743 __ ldrsh(dest->as_register(), as_Address(addr)); 744 break; 745 746 747 #ifdef __SOFTFP__ 748 case T_DOUBLE: 749 #endif // __SOFTFP__ 750 case T_LONG: { 751 Register to_lo = dest->as_register_lo(); 752 Register to_hi = dest->as_register_hi(); 753 if (addr->index()->is_register()) { 754 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 755 assert(addr->disp() == 0, "Not yet supporting both"); 756 __ add(Rtemp, base_reg, addr->index()->as_register()); 757 base_reg = Rtemp; 758 __ ldr(to_lo, Address(Rtemp)); 759 if (patch != nullptr) { 760 __ nop(); // see comment before patching_epilog for 2nd ldr 761 patching_epilog(patch, lir_patch_low, base_reg, info); 762 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 763 patch_code = lir_patch_high; 764 } 765 __ ldr(to_hi, Address(Rtemp, BytesPerWord)); 766 } else if (base_reg == to_lo) { 767 __ ldr(to_hi, as_Address_hi(addr)); 768 if (patch != nullptr) { 769 __ nop(); // see comment before patching_epilog for 2nd ldr 770 patching_epilog(patch, lir_patch_high, base_reg, info); 771 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 772 patch_code = lir_patch_low; 773 } 774 __ ldr(to_lo, as_Address_lo(addr)); 775 } else { 776 __ ldr(to_lo, as_Address_lo(addr)); 777 if (patch != nullptr) { 778 __ nop(); // see comment before patching_epilog for 2nd ldr 779 patching_epilog(patch, lir_patch_low, base_reg, info); 780 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 781 patch_code = lir_patch_high; 782 } 783 __ ldr(to_hi, as_Address_hi(addr)); 784 } 785 break; 786 } 787 788 #ifndef __SOFTFP__ 789 case T_FLOAT: 790 if (addr->index()->is_register()) { 791 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 792 __ add(Rtemp, base_reg, addr->index()->as_register()); 793 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 794 __ flds(dest->as_float_reg(), Address(Rtemp, addr->disp())); 795 } else { 796 __ flds(dest->as_float_reg(), as_Address(addr)); 797 } 798 break; 799 800 case T_DOUBLE: 801 if (addr->index()->is_register()) { 802 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 803 __ add(Rtemp, base_reg, addr->index()->as_register()); 804 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 805 __ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp())); 806 } else { 807 __ fldd(dest->as_double_reg(), as_Address(addr)); 808 } 809 break; 810 #endif // __SOFTFP__ 811 812 813 default: 814 ShouldNotReachHere(); 815 } 816 817 if (patch != nullptr) { 818 // Offset embedded into LDR/STR instruction may appear not enough 819 // to address a field. So, provide a space for one more instruction 820 // that will deal with larger offsets. 821 __ nop(); 822 patching_epilog(patch, patch_code, base_reg, info); 823 } 824 825 } 826 827 828 void LIR_Assembler::emit_op3(LIR_Op3* op) { 829 bool is_32 = op->result_opr()->is_single_cpu(); 830 831 if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) { 832 int c = op->in_opr2()->as_constant_ptr()->as_jint(); 833 assert(is_power_of_2(c), "non power-of-2 constant should be put in a register"); 834 835 Register left = op->in_opr1()->as_register(); 836 Register dest = op->result_opr()->as_register(); 837 if (c == 1) { 838 __ mov(dest, left); 839 } else if (c == 2) { 840 __ add_32(dest, left, AsmOperand(left, lsr, 31)); 841 __ asr_32(dest, dest, 1); 842 } else if (c != (int) 0x80000000) { 843 int power = log2i_exact(c); 844 __ asr_32(Rtemp, left, 31); 845 __ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0); 846 __ asr_32(dest, dest, power); // dest = dest >>> power; 847 } else { 848 // x/0x80000000 is a special case, since dividend is a power of two, but is negative. 849 // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000. 850 __ cmp_32(left, c); 851 __ mov(dest, 0, ne); 852 __ mov(dest, 1, eq); 853 } 854 } else { 855 assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3"); 856 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type); 857 add_debug_info_for_div0_here(op->info()); 858 } 859 } 860 861 862 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 863 #ifdef ASSERT 864 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 865 if (op->block() != nullptr) _branch_target_blocks.append(op->block()); 866 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock()); 867 assert(op->info() == nullptr, "CodeEmitInfo?"); 868 #endif // ASSERT 869 870 #ifdef __SOFTFP__ 871 assert (op->code() != lir_cond_float_branch, "this should be impossible"); 872 #else 873 if (op->code() == lir_cond_float_branch) { 874 __ fmstat(); 875 __ b(*(op->ublock()->label()), vs); 876 } 877 #endif // __SOFTFP__ 878 879 AsmCondition acond = al; 880 switch (op->cond()) { 881 case lir_cond_equal: acond = eq; break; 882 case lir_cond_notEqual: acond = ne; break; 883 case lir_cond_less: acond = lt; break; 884 case lir_cond_lessEqual: acond = le; break; 885 case lir_cond_greaterEqual: acond = ge; break; 886 case lir_cond_greater: acond = gt; break; 887 case lir_cond_aboveEqual: acond = hs; break; 888 case lir_cond_belowEqual: acond = ls; break; 889 default: assert(op->cond() == lir_cond_always, "must be"); 890 } 891 __ b(*(op->label()), acond); 892 } 893 894 895 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 896 LIR_Opr src = op->in_opr(); 897 LIR_Opr dest = op->result_opr(); 898 899 switch (op->bytecode()) { 900 case Bytecodes::_i2l: 901 move_regs(src->as_register(), dest->as_register_lo()); 902 __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31)); 903 break; 904 case Bytecodes::_l2i: 905 move_regs(src->as_register_lo(), dest->as_register()); 906 break; 907 case Bytecodes::_i2b: 908 __ sign_extend(dest->as_register(), src->as_register(), 8); 909 break; 910 case Bytecodes::_i2s: 911 __ sign_extend(dest->as_register(), src->as_register(), 16); 912 break; 913 case Bytecodes::_i2c: 914 __ zero_extend(dest->as_register(), src->as_register(), 16); 915 break; 916 case Bytecodes::_f2d: 917 __ convert_f2d(dest->as_double_reg(), src->as_float_reg()); 918 break; 919 case Bytecodes::_d2f: 920 __ convert_d2f(dest->as_float_reg(), src->as_double_reg()); 921 break; 922 case Bytecodes::_i2f: 923 __ fmsr(Stemp, src->as_register()); 924 __ fsitos(dest->as_float_reg(), Stemp); 925 break; 926 case Bytecodes::_i2d: 927 __ fmsr(Stemp, src->as_register()); 928 __ fsitod(dest->as_double_reg(), Stemp); 929 break; 930 case Bytecodes::_f2i: 931 __ ftosizs(Stemp, src->as_float_reg()); 932 __ fmrs(dest->as_register(), Stemp); 933 break; 934 case Bytecodes::_d2i: 935 __ ftosizd(Stemp, src->as_double_reg()); 936 __ fmrs(dest->as_register(), Stemp); 937 break; 938 default: 939 ShouldNotReachHere(); 940 } 941 } 942 943 944 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 945 if (op->init_check()) { 946 Register tmp = op->tmp1()->as_register(); 947 __ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset())); 948 add_debug_info_for_null_check_here(op->stub()->info()); 949 __ cmp(tmp, InstanceKlass::fully_initialized); 950 __ b(*op->stub()->entry(), ne); 951 } 952 __ allocate_object(op->obj()->as_register(), 953 op->tmp1()->as_register(), 954 op->tmp2()->as_register(), 955 op->tmp3()->as_register(), 956 op->header_size(), 957 op->object_size(), 958 op->klass()->as_register(), 959 *op->stub()->entry()); 960 __ bind(*op->stub()->continuation()); 961 } 962 963 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 964 if (UseSlowPath || 965 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 966 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 967 __ b(*op->stub()->entry()); 968 } else { 969 __ allocate_array(op->obj()->as_register(), 970 op->len()->as_register(), 971 op->tmp1()->as_register(), 972 op->tmp2()->as_register(), 973 op->tmp3()->as_register(), 974 arrayOopDesc::header_size(op->type()), 975 type2aelembytes(op->type()), 976 op->klass()->as_register(), 977 *op->stub()->entry()); 978 } 979 __ bind(*op->stub()->continuation()); 980 } 981 982 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 983 ciMethodData *md, ciProfileData *data, 984 Register recv, Register tmp1, Label* update_done) { 985 assert_different_registers(mdo, recv, tmp1); 986 uint i; 987 for (i = 0; i < VirtualCallData::row_limit(); i++) { 988 Label next_test; 989 // See if the receiver is receiver[n]. 990 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 991 mdo_offset_bias); 992 __ ldr(tmp1, receiver_addr); 993 __ verify_klass_ptr(tmp1); 994 __ cmp(recv, tmp1); 995 __ b(next_test, ne); 996 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 997 mdo_offset_bias); 998 __ ldr(tmp1, data_addr); 999 __ add(tmp1, tmp1, DataLayout::counter_increment); 1000 __ str(tmp1, data_addr); 1001 __ b(*update_done); 1002 __ bind(next_test); 1003 } 1004 1005 // Didn't find receiver; find next empty slot and fill it in 1006 for (i = 0; i < VirtualCallData::row_limit(); i++) { 1007 Label next_test; 1008 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 1009 mdo_offset_bias); 1010 __ ldr(tmp1, recv_addr); 1011 __ cbnz(tmp1, next_test); 1012 __ str(recv, recv_addr); 1013 __ mov(tmp1, DataLayout::counter_increment); 1014 __ str(tmp1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 1015 mdo_offset_bias)); 1016 __ b(*update_done); 1017 __ bind(next_test); 1018 } 1019 } 1020 1021 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 1022 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 1023 md = method->method_data_or_null(); 1024 assert(md != nullptr, "Sanity"); 1025 data = md->bci_to_data(bci); 1026 assert(data != nullptr, "need data for checkcast"); 1027 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1028 if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) { 1029 // The offset is large so bias the mdo by the base of the slot so 1030 // that the ldr can use an immediate offset to reference the slots of the data 1031 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 1032 } 1033 } 1034 1035 // On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null). 1036 void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci, 1037 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias, 1038 Register obj, Register mdo, Register data_val, Label* obj_is_null) { 1039 assert(method != nullptr, "Should have method"); 1040 assert_different_registers(obj, mdo, data_val); 1041 setup_md_access(method, bci, md, data, mdo_offset_bias); 1042 Label not_null; 1043 __ b(not_null, ne); 1044 __ mov_metadata(mdo, md->constant_encoding()); 1045 if (mdo_offset_bias > 0) { 1046 __ mov_slow(data_val, mdo_offset_bias); 1047 __ add(mdo, mdo, data_val); 1048 } 1049 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 1050 __ ldrb(data_val, flags_addr); 1051 __ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant()); 1052 __ strb(data_val, flags_addr); 1053 __ b(*obj_is_null); 1054 __ bind(not_null); 1055 } 1056 1057 void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias, 1058 Register mdo, Register recv, Register value, Register tmp1, 1059 Label* profile_cast_success, Label* profile_cast_failure, 1060 Label* success, Label* failure) { 1061 assert_different_registers(mdo, value, tmp1); 1062 __ bind(*profile_cast_success); 1063 __ mov_metadata(mdo, md->constant_encoding()); 1064 if (mdo_offset_bias > 0) { 1065 __ mov_slow(tmp1, mdo_offset_bias); 1066 __ add(mdo, mdo, tmp1); 1067 } 1068 __ load_klass(recv, value); 1069 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 1070 __ b(*success); 1071 // Cast failure case 1072 __ bind(*profile_cast_failure); 1073 __ mov_metadata(mdo, md->constant_encoding()); 1074 if (mdo_offset_bias > 0) { 1075 __ mov_slow(tmp1, mdo_offset_bias); 1076 __ add(mdo, mdo, tmp1); 1077 } 1078 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 1079 __ ldr(tmp1, data_addr); 1080 __ sub(tmp1, tmp1, DataLayout::counter_increment); 1081 __ str(tmp1, data_addr); 1082 __ b(*failure); 1083 } 1084 1085 // Sets `res` to true, if `cond` holds. 1086 static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) { 1087 __ mov(res, 1, cond); 1088 } 1089 1090 1091 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1092 // TODO: ARM - can be more effective with one more register 1093 switch (op->code()) { 1094 case lir_store_check: { 1095 CodeStub* stub = op->stub(); 1096 Register value = op->object()->as_register(); 1097 Register array = op->array()->as_register(); 1098 Register klass_RInfo = op->tmp1()->as_register(); 1099 Register k_RInfo = op->tmp2()->as_register(); 1100 assert_different_registers(klass_RInfo, k_RInfo, Rtemp); 1101 if (op->should_profile()) { 1102 assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp); 1103 } 1104 1105 // check if it needs to be profiled 1106 ciMethodData* md; 1107 ciProfileData* data; 1108 int mdo_offset_bias = 0; 1109 Label profile_cast_success, profile_cast_failure, done; 1110 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1111 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1112 1113 if (op->should_profile()) { 1114 __ cmp(value, 0); 1115 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done); 1116 } else { 1117 __ cbz(value, done); 1118 } 1119 assert_different_registers(k_RInfo, value); 1120 add_debug_info_for_null_check_here(op->info_for_exception()); 1121 __ load_klass(k_RInfo, array); 1122 __ load_klass(klass_RInfo, value); 1123 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1124 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1125 // check for immediate positive hit 1126 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1127 __ cmp(klass_RInfo, k_RInfo); 1128 __ cond_cmp(Rtemp, k_RInfo, ne); 1129 __ b(*success_target, eq); 1130 // check for immediate negative hit 1131 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1132 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1133 __ b(*failure_target, ne); 1134 // slow case 1135 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1136 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1137 __ cbz(R0, *failure_target); 1138 if (op->should_profile()) { 1139 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1140 if (mdo == value) { 1141 mdo = k_RInfo; 1142 recv = klass_RInfo; 1143 } 1144 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, value, tmp1, 1145 &profile_cast_success, &profile_cast_failure, 1146 &done, stub->entry()); 1147 } 1148 __ bind(done); 1149 break; 1150 } 1151 1152 case lir_checkcast: { 1153 CodeStub* stub = op->stub(); 1154 Register obj = op->object()->as_register(); 1155 Register res = op->result_opr()->as_register(); 1156 Register klass_RInfo = op->tmp1()->as_register(); 1157 Register k_RInfo = op->tmp2()->as_register(); 1158 ciKlass* k = op->klass(); 1159 assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp); 1160 1161 if (stub->is_simple_exception_stub()) { 1162 // TODO: ARM - Late binding is used to prevent confusion of register allocator 1163 assert(stub->is_exception_throw_stub(), "must be"); 1164 ((SimpleExceptionStub*)stub)->set_obj(op->result_opr()); 1165 } 1166 ciMethodData* md; 1167 ciProfileData* data; 1168 int mdo_offset_bias = 0; 1169 1170 Label done; 1171 1172 Label profile_cast_failure, profile_cast_success; 1173 Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry(); 1174 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1175 1176 1177 __ movs(res, obj); 1178 if (op->should_profile()) { 1179 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1180 } else { 1181 __ b(done, eq); 1182 } 1183 if (k->is_loaded()) { 1184 __ mov_metadata(k_RInfo, k->constant_encoding()); 1185 } else if (k_RInfo != obj) { 1186 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1187 __ movs(res, obj); 1188 } else { 1189 // Patching doesn't update "res" register after GC, so do patching first 1190 klass2reg_with_patching(Rtemp, op->info_for_patch()); 1191 __ movs(res, obj); 1192 __ mov(k_RInfo, Rtemp); 1193 } 1194 __ load_klass(klass_RInfo, res, ne); 1195 1196 if (op->fast_check()) { 1197 __ cmp(klass_RInfo, k_RInfo, ne); 1198 __ b(*failure_target, ne); 1199 } else if (k->is_loaded()) { 1200 __ b(*success_target, eq); 1201 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1202 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1203 __ cmp(Rtemp, k_RInfo); 1204 __ b(*failure_target, ne); 1205 } else { 1206 __ cmp(klass_RInfo, k_RInfo); 1207 __ cmp(Rtemp, k_RInfo, ne); 1208 __ b(*success_target, eq); 1209 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1210 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1211 __ cbz(R0, *failure_target); 1212 } 1213 } else { 1214 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1215 __ b(*success_target, eq); 1216 // check for immediate positive hit 1217 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1218 __ cmp(klass_RInfo, k_RInfo); 1219 __ cmp(Rtemp, k_RInfo, ne); 1220 __ b(*success_target, eq); 1221 // check for immediate negative hit 1222 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1223 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1224 __ b(*failure_target, ne); 1225 // slow case 1226 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1227 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1228 __ cbz(R0, *failure_target); 1229 } 1230 1231 if (op->should_profile()) { 1232 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1233 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1234 &profile_cast_success, &profile_cast_failure, 1235 &done, stub->entry()); 1236 } 1237 __ bind(done); 1238 break; 1239 } 1240 1241 case lir_instanceof: { 1242 Register obj = op->object()->as_register(); 1243 Register res = op->result_opr()->as_register(); 1244 Register klass_RInfo = op->tmp1()->as_register(); 1245 Register k_RInfo = op->tmp2()->as_register(); 1246 ciKlass* k = op->klass(); 1247 assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp); 1248 1249 ciMethodData* md; 1250 ciProfileData* data; 1251 int mdo_offset_bias = 0; 1252 1253 Label done; 1254 1255 Label profile_cast_failure, profile_cast_success; 1256 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done; 1257 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1258 1259 __ movs(res, obj); 1260 1261 if (op->should_profile()) { 1262 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1263 } else { 1264 __ b(done, eq); 1265 } 1266 1267 if (k->is_loaded()) { 1268 __ mov_metadata(k_RInfo, k->constant_encoding()); 1269 } else { 1270 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res)); 1271 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1272 } 1273 __ load_klass(klass_RInfo, res); 1274 1275 if (!op->should_profile()) { 1276 __ mov(res, 0); 1277 } 1278 1279 if (op->fast_check()) { 1280 __ cmp(klass_RInfo, k_RInfo); 1281 if (!op->should_profile()) { 1282 set_instanceof_result(_masm, res, eq); 1283 } else { 1284 __ b(profile_cast_failure, ne); 1285 } 1286 } else if (k->is_loaded()) { 1287 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1288 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1289 __ cmp(Rtemp, k_RInfo); 1290 if (!op->should_profile()) { 1291 set_instanceof_result(_masm, res, eq); 1292 } else { 1293 __ b(profile_cast_failure, ne); 1294 } 1295 } else { 1296 __ cmp(klass_RInfo, k_RInfo); 1297 __ cond_cmp(Rtemp, k_RInfo, ne); 1298 if (!op->should_profile()) { 1299 set_instanceof_result(_masm, res, eq); 1300 } 1301 __ b(*success_target, eq); 1302 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1303 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1304 if (!op->should_profile()) { 1305 move_regs(R0, res); 1306 } else { 1307 __ cbz(R0, *failure_target); 1308 } 1309 } 1310 } else { 1311 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1312 // check for immediate positive hit 1313 __ cmp(klass_RInfo, k_RInfo); 1314 if (!op->should_profile()) { 1315 __ ldr(res, Address(klass_RInfo, Rtemp), ne); 1316 __ cond_cmp(res, k_RInfo, ne); 1317 set_instanceof_result(_masm, res, eq); 1318 } else { 1319 __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne); 1320 __ cond_cmp(Rtemp, k_RInfo, ne); 1321 } 1322 __ b(*success_target, eq); 1323 // check for immediate negative hit 1324 if (op->should_profile()) { 1325 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1326 } 1327 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1328 if (!op->should_profile()) { 1329 __ mov(res, 0, ne); 1330 } 1331 __ b(*failure_target, ne); 1332 // slow case 1333 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1334 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1335 if (!op->should_profile()) { 1336 move_regs(R0, res); 1337 } 1338 if (op->should_profile()) { 1339 __ cbz(R0, *failure_target); 1340 } 1341 } 1342 1343 if (op->should_profile()) { 1344 Label done_ok, done_failure; 1345 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1346 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1347 &profile_cast_success, &profile_cast_failure, 1348 &done_ok, &done_failure); 1349 __ bind(done_failure); 1350 __ mov(res, 0); 1351 __ b(done); 1352 __ bind(done_ok); 1353 __ mov(res, 1); 1354 } 1355 __ bind(done); 1356 break; 1357 } 1358 default: 1359 ShouldNotReachHere(); 1360 } 1361 } 1362 1363 1364 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1365 // if (*addr == cmpval) { 1366 // *addr = newval; 1367 // dest = 1; 1368 // } else { 1369 // dest = 0; 1370 // } 1371 // FIXME: membar_release 1372 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 1373 Register addr = op->addr()->is_register() ? 1374 op->addr()->as_pointer_register() : 1375 op->addr()->as_address_ptr()->base()->as_pointer_register(); 1376 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp"); 1377 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_Opr::illegalOpr(), "unexpected index"); 1378 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 1379 Register cmpval = op->cmp_value()->as_register(); 1380 Register newval = op->new_value()->as_register(); 1381 Register dest = op->result_opr()->as_register(); 1382 assert_different_registers(dest, addr, cmpval, newval, Rtemp); 1383 1384 __ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer 1385 __ mov(dest, 1, eq); 1386 __ mov(dest, 0, ne); 1387 } else if (op->code() == lir_cas_long) { 1388 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 1389 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 1390 Register new_value_lo = op->new_value()->as_register_lo(); 1391 Register new_value_hi = op->new_value()->as_register_hi(); 1392 Register dest = op->result_opr()->as_register(); 1393 Register tmp_lo = op->tmp1()->as_register_lo(); 1394 Register tmp_hi = op->tmp1()->as_register_hi(); 1395 1396 assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr); 1397 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 1398 assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair"); 1399 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1400 assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1401 __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi, 1402 new_value_lo, new_value_hi, addr, 0); 1403 } else { 1404 Unimplemented(); 1405 } 1406 // FIXME: is full membar really needed instead of just membar_acquire? 1407 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 1408 } 1409 1410 1411 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 1412 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 1413 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on arm"); 1414 1415 AsmCondition acond = al; 1416 AsmCondition ncond = nv; 1417 if (opr1 != opr2) { 1418 switch (condition) { 1419 case lir_cond_equal: acond = eq; ncond = ne; break; 1420 case lir_cond_notEqual: acond = ne; ncond = eq; break; 1421 case lir_cond_less: acond = lt; ncond = ge; break; 1422 case lir_cond_lessEqual: acond = le; ncond = gt; break; 1423 case lir_cond_greaterEqual: acond = ge; ncond = lt; break; 1424 case lir_cond_greater: acond = gt; ncond = le; break; 1425 case lir_cond_aboveEqual: acond = hs; ncond = lo; break; 1426 case lir_cond_belowEqual: acond = ls; ncond = hi; break; 1427 default: ShouldNotReachHere(); 1428 } 1429 } 1430 1431 for (;;) { // two iterations only 1432 if (opr1 == result) { 1433 // do nothing 1434 } else if (opr1->is_single_cpu()) { 1435 __ mov(result->as_register(), opr1->as_register(), acond); 1436 } else if (opr1->is_double_cpu()) { 1437 __ long_move(result->as_register_lo(), result->as_register_hi(), 1438 opr1->as_register_lo(), opr1->as_register_hi(), acond); 1439 } else if (opr1->is_single_stack()) { 1440 __ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond); 1441 } else if (opr1->is_double_stack()) { 1442 __ ldr(result->as_register_lo(), 1443 frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond); 1444 __ ldr(result->as_register_hi(), 1445 frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond); 1446 } else if (opr1->is_illegal()) { 1447 // do nothing: this part of the cmove has been optimized away in the peephole optimizer 1448 } else { 1449 assert(opr1->is_constant(), "must be"); 1450 LIR_Const* c = opr1->as_constant_ptr(); 1451 1452 switch (c->type()) { 1453 case T_INT: 1454 __ mov_slow(result->as_register(), c->as_jint(), acond); 1455 break; 1456 case T_LONG: 1457 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1458 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1459 break; 1460 case T_OBJECT: 1461 __ mov_oop(result->as_register(), c->as_jobject(), 0, acond); 1462 break; 1463 case T_FLOAT: 1464 #ifdef __SOFTFP__ 1465 // not generated now. 1466 __ mov_slow(result->as_register(), c->as_jint(), acond); 1467 #else 1468 __ mov_float(result->as_float_reg(), c->as_jfloat(), acond); 1469 #endif // __SOFTFP__ 1470 break; 1471 case T_DOUBLE: 1472 #ifdef __SOFTFP__ 1473 // not generated now. 1474 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1475 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1476 #else 1477 __ mov_double(result->as_double_reg(), c->as_jdouble(), acond); 1478 #endif // __SOFTFP__ 1479 break; 1480 case T_METADATA: 1481 __ mov_metadata(result->as_register(), c->as_metadata(), acond); 1482 break; 1483 default: 1484 ShouldNotReachHere(); 1485 } 1486 } 1487 1488 // Negate the condition and repeat the algorithm with the second operand 1489 if (opr1 == opr2) { break; } 1490 opr1 = opr2; 1491 acond = ncond; 1492 } 1493 } 1494 1495 #ifdef ASSERT 1496 static int reg_size(LIR_Opr op) { 1497 switch (op->type()) { 1498 case T_FLOAT: 1499 case T_INT: return BytesPerInt; 1500 case T_LONG: 1501 case T_DOUBLE: return BytesPerLong; 1502 case T_OBJECT: 1503 case T_ARRAY: 1504 case T_METADATA: return BytesPerWord; 1505 case T_ADDRESS: 1506 case T_ILLEGAL: // fall through 1507 default: ShouldNotReachHere(); return -1; 1508 } 1509 } 1510 #endif 1511 1512 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1513 assert(info == nullptr, "unused on this code path"); 1514 assert(dest->is_register(), "wrong items state"); 1515 1516 if (right->is_address()) { 1517 // special case for adding shifted/extended register 1518 const Register res = dest->as_pointer_register(); 1519 const Register lreg = left->as_pointer_register(); 1520 const LIR_Address* addr = right->as_address_ptr(); 1521 1522 assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1523 1524 int scale = addr->scale(); 1525 AsmShift shift = lsl; 1526 1527 1528 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be"); 1529 assert(reg_size(addr->base()) == reg_size(dest), "should be"); 1530 assert(reg_size(dest) == wordSize, "should be"); 1531 1532 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale); 1533 switch (code) { 1534 case lir_add: __ add(res, lreg, operand); break; 1535 case lir_sub: __ sub(res, lreg, operand); break; 1536 default: ShouldNotReachHere(); 1537 } 1538 1539 } else if (left->is_address()) { 1540 assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()"); 1541 const LIR_Address* addr = left->as_address_ptr(); 1542 const Register res = dest->as_register(); 1543 const Register rreg = right->as_register(); 1544 assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1545 __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale())); 1546 1547 } else if (dest->is_single_cpu()) { 1548 assert(left->is_single_cpu(), "unexpected left operand"); 1549 1550 const Register res = dest->as_register(); 1551 const Register lreg = left->as_register(); 1552 1553 if (right->is_single_cpu()) { 1554 const Register rreg = right->as_register(); 1555 switch (code) { 1556 case lir_add: __ add_32(res, lreg, rreg); break; 1557 case lir_sub: __ sub_32(res, lreg, rreg); break; 1558 case lir_mul: __ mul_32(res, lreg, rreg); break; 1559 default: ShouldNotReachHere(); 1560 } 1561 } else { 1562 assert(right->is_constant(), "must be"); 1563 const jint c = right->as_constant_ptr()->as_jint(); 1564 if (!Assembler::is_arith_imm_in_range(c)) { 1565 BAILOUT("illegal arithmetic operand"); 1566 } 1567 switch (code) { 1568 case lir_add: __ add_32(res, lreg, c); break; 1569 case lir_sub: __ sub_32(res, lreg, c); break; 1570 default: ShouldNotReachHere(); 1571 } 1572 } 1573 1574 } else if (dest->is_double_cpu()) { 1575 Register res_lo = dest->as_register_lo(); 1576 Register res_hi = dest->as_register_hi(); 1577 Register lreg_lo = left->as_register_lo(); 1578 Register lreg_hi = left->as_register_hi(); 1579 if (right->is_double_cpu()) { 1580 Register rreg_lo = right->as_register_lo(); 1581 Register rreg_hi = right->as_register_hi(); 1582 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1583 res_lo = Rtemp; 1584 } 1585 switch (code) { 1586 case lir_add: 1587 __ adds(res_lo, lreg_lo, rreg_lo); 1588 __ adc(res_hi, lreg_hi, rreg_hi); 1589 break; 1590 case lir_sub: 1591 __ subs(res_lo, lreg_lo, rreg_lo); 1592 __ sbc(res_hi, lreg_hi, rreg_hi); 1593 break; 1594 default: 1595 ShouldNotReachHere(); 1596 } 1597 } else { 1598 assert(right->is_constant(), "must be"); 1599 assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range"); 1600 const jint c = (jint) right->as_constant_ptr()->as_jlong(); 1601 if (res_lo == lreg_hi) { 1602 res_lo = Rtemp; 1603 } 1604 switch (code) { 1605 case lir_add: 1606 __ adds(res_lo, lreg_lo, c); 1607 __ adc(res_hi, lreg_hi, 0); 1608 break; 1609 case lir_sub: 1610 __ subs(res_lo, lreg_lo, c); 1611 __ sbc(res_hi, lreg_hi, 0); 1612 break; 1613 default: 1614 ShouldNotReachHere(); 1615 } 1616 } 1617 move_regs(res_lo, dest->as_register_lo()); 1618 1619 } else if (dest->is_single_fpu()) { 1620 assert(left->is_single_fpu(), "must be"); 1621 assert(right->is_single_fpu(), "must be"); 1622 const FloatRegister res = dest->as_float_reg(); 1623 const FloatRegister lreg = left->as_float_reg(); 1624 const FloatRegister rreg = right->as_float_reg(); 1625 switch (code) { 1626 case lir_add: __ add_float(res, lreg, rreg); break; 1627 case lir_sub: __ sub_float(res, lreg, rreg); break; 1628 case lir_mul: __ mul_float(res, lreg, rreg); break; 1629 case lir_div: __ div_float(res, lreg, rreg); break; 1630 default: ShouldNotReachHere(); 1631 } 1632 } else if (dest->is_double_fpu()) { 1633 assert(left->is_double_fpu(), "must be"); 1634 assert(right->is_double_fpu(), "must be"); 1635 const FloatRegister res = dest->as_double_reg(); 1636 const FloatRegister lreg = left->as_double_reg(); 1637 const FloatRegister rreg = right->as_double_reg(); 1638 switch (code) { 1639 case lir_add: __ add_double(res, lreg, rreg); break; 1640 case lir_sub: __ sub_double(res, lreg, rreg); break; 1641 case lir_mul: __ mul_double(res, lreg, rreg); break; 1642 case lir_div: __ div_double(res, lreg, rreg); break; 1643 default: ShouldNotReachHere(); 1644 } 1645 } else { 1646 ShouldNotReachHere(); 1647 } 1648 } 1649 1650 1651 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 1652 switch (code) { 1653 case lir_abs: 1654 __ abs_double(dest->as_double_reg(), value->as_double_reg()); 1655 break; 1656 case lir_sqrt: 1657 __ sqrt_double(dest->as_double_reg(), value->as_double_reg()); 1658 break; 1659 default: 1660 ShouldNotReachHere(); 1661 } 1662 } 1663 1664 1665 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1666 assert(dest->is_register(), "wrong items state"); 1667 assert(left->is_register(), "wrong items state"); 1668 1669 if (dest->is_single_cpu()) { 1670 1671 const Register res = dest->as_register(); 1672 const Register lreg = left->as_register(); 1673 1674 if (right->is_single_cpu()) { 1675 const Register rreg = right->as_register(); 1676 switch (code) { 1677 case lir_logic_and: __ and_32(res, lreg, rreg); break; 1678 case lir_logic_or: __ orr_32(res, lreg, rreg); break; 1679 case lir_logic_xor: __ eor_32(res, lreg, rreg); break; 1680 default: ShouldNotReachHere(); 1681 } 1682 } else { 1683 assert(right->is_constant(), "must be"); 1684 const uint c = (uint)right->as_constant_ptr()->as_jint(); 1685 if (!Assembler::is_arith_imm_in_range(c)) { 1686 BAILOUT("illegal arithmetic operand"); 1687 } 1688 switch (code) { 1689 case lir_logic_and: __ and_32(res, lreg, c); break; 1690 case lir_logic_or: __ orr_32(res, lreg, c); break; 1691 case lir_logic_xor: __ eor_32(res, lreg, c); break; 1692 default: ShouldNotReachHere(); 1693 } 1694 } 1695 } else { 1696 assert(dest->is_double_cpu(), "should be"); 1697 Register res_lo = dest->as_register_lo(); 1698 1699 assert (dest->type() == T_LONG, "unexpected result type"); 1700 assert (left->type() == T_LONG, "unexpected left type"); 1701 assert (right->type() == T_LONG, "unexpected right type"); 1702 1703 const Register res_hi = dest->as_register_hi(); 1704 const Register lreg_lo = left->as_register_lo(); 1705 const Register lreg_hi = left->as_register_hi(); 1706 1707 if (right->is_register()) { 1708 const Register rreg_lo = right->as_register_lo(); 1709 const Register rreg_hi = right->as_register_hi(); 1710 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1711 res_lo = Rtemp; // Temp register helps to avoid overlap between result and input 1712 } 1713 switch (code) { 1714 case lir_logic_and: 1715 __ andr(res_lo, lreg_lo, rreg_lo); 1716 __ andr(res_hi, lreg_hi, rreg_hi); 1717 break; 1718 case lir_logic_or: 1719 __ orr(res_lo, lreg_lo, rreg_lo); 1720 __ orr(res_hi, lreg_hi, rreg_hi); 1721 break; 1722 case lir_logic_xor: 1723 __ eor(res_lo, lreg_lo, rreg_lo); 1724 __ eor(res_hi, lreg_hi, rreg_hi); 1725 break; 1726 default: 1727 ShouldNotReachHere(); 1728 } 1729 move_regs(res_lo, dest->as_register_lo()); 1730 } else { 1731 assert(right->is_constant(), "must be"); 1732 const jint c_lo = (jint) right->as_constant_ptr()->as_jlong(); 1733 const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32); 1734 // Case for logic_or from do_ClassIDIntrinsic() 1735 if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) { 1736 switch (code) { 1737 case lir_logic_and: 1738 __ andr(res_lo, lreg_lo, c_lo); 1739 __ mov(res_hi, 0); 1740 break; 1741 case lir_logic_or: 1742 __ orr(res_lo, lreg_lo, c_lo); 1743 break; 1744 case lir_logic_xor: 1745 __ eor(res_lo, lreg_lo, c_lo); 1746 break; 1747 default: 1748 ShouldNotReachHere(); 1749 } 1750 } else if (code == lir_logic_and && 1751 c_hi == -1 && 1752 (AsmOperand::is_rotated_imm(c_lo) || 1753 AsmOperand::is_rotated_imm(~c_lo))) { 1754 // Another case which handles logic_and from do_ClassIDIntrinsic() 1755 if (AsmOperand::is_rotated_imm(c_lo)) { 1756 __ andr(res_lo, lreg_lo, c_lo); 1757 } else { 1758 __ bic(res_lo, lreg_lo, ~c_lo); 1759 } 1760 if (res_hi != lreg_hi) { 1761 __ mov(res_hi, lreg_hi); 1762 } 1763 } else { 1764 BAILOUT("64 bit constant cannot be inlined"); 1765 } 1766 } 1767 } 1768 } 1769 1770 1771 1772 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1773 if (opr1->is_single_cpu()) { 1774 if (opr2->is_constant()) { 1775 switch (opr2->as_constant_ptr()->type()) { 1776 case T_INT: { 1777 const jint c = opr2->as_constant_ptr()->as_jint(); 1778 if (Assembler::is_arith_imm_in_range(c)) { 1779 __ cmp_32(opr1->as_register(), c); 1780 } else if (Assembler::is_arith_imm_in_range(-c)) { 1781 __ cmn_32(opr1->as_register(), -c); 1782 } else { 1783 // This can happen when compiling lookupswitch 1784 __ mov_slow(Rtemp, c); 1785 __ cmp_32(opr1->as_register(), Rtemp); 1786 } 1787 break; 1788 } 1789 case T_OBJECT: 1790 assert(opr2->as_constant_ptr()->as_jobject() == nullptr, "cannot handle otherwise"); 1791 __ cmp(opr1->as_register(), 0); 1792 break; 1793 case T_METADATA: 1794 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests"); 1795 assert(opr2->as_constant_ptr()->as_metadata() == nullptr, "cannot handle otherwise"); 1796 __ cmp(opr1->as_register(), 0); 1797 break; 1798 default: 1799 ShouldNotReachHere(); 1800 } 1801 } else if (opr2->is_single_cpu()) { 1802 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1803 assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY, "incompatibe type"); 1804 __ cmpoop(opr1->as_register(), opr2->as_register()); 1805 } else if (opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) { 1806 assert(opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type"); 1807 __ cmp(opr1->as_register(), opr2->as_register()); 1808 } else { 1809 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type"); 1810 __ cmp_32(opr1->as_register(), opr2->as_register()); 1811 } 1812 } else { 1813 ShouldNotReachHere(); 1814 } 1815 } else if (opr1->is_double_cpu()) { 1816 Register xlo = opr1->as_register_lo(); 1817 Register xhi = opr1->as_register_hi(); 1818 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1819 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise"); 1820 __ orrs(Rtemp, xlo, xhi); 1821 } else if (opr2->is_register()) { 1822 Register ylo = opr2->as_register_lo(); 1823 Register yhi = opr2->as_register_hi(); 1824 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1825 __ teq(xhi, yhi); 1826 __ teq(xlo, ylo, eq); 1827 } else { 1828 __ subs(Rtemp, xlo, ylo); 1829 __ sbcs(Rtemp, xhi, yhi); 1830 } 1831 } else { 1832 ShouldNotReachHere(); 1833 } 1834 } else if (opr1->is_single_fpu()) { 1835 if (opr2->is_constant()) { 1836 assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise"); 1837 __ cmp_zero_float(opr1->as_float_reg()); 1838 } else { 1839 __ cmp_float(opr1->as_float_reg(), opr2->as_float_reg()); 1840 } 1841 } else if (opr1->is_double_fpu()) { 1842 if (opr2->is_constant()) { 1843 assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise"); 1844 __ cmp_zero_double(opr1->as_double_reg()); 1845 } else { 1846 __ cmp_double(opr1->as_double_reg(), opr2->as_double_reg()); 1847 } 1848 } else { 1849 ShouldNotReachHere(); 1850 } 1851 } 1852 1853 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1854 const Register res = dst->as_register(); 1855 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1856 comp_op(lir_cond_unknown, left, right, op); 1857 __ fmstat(); 1858 if (code == lir_ucmp_fd2i) { // unordered is less 1859 __ mvn(res, 0, lt); 1860 __ mov(res, 1, ge); 1861 } else { // unordered is greater 1862 __ mov(res, 1, cs); 1863 __ mvn(res, 0, cc); 1864 } 1865 __ mov(res, 0, eq); 1866 1867 } else { 1868 assert(code == lir_cmp_l2i, "must be"); 1869 1870 Label done; 1871 const Register xlo = left->as_register_lo(); 1872 const Register xhi = left->as_register_hi(); 1873 const Register ylo = right->as_register_lo(); 1874 const Register yhi = right->as_register_hi(); 1875 __ cmp(xhi, yhi); 1876 __ mov(res, 1, gt); 1877 __ mvn(res, 0, lt); 1878 __ b(done, ne); 1879 __ subs(res, xlo, ylo); 1880 __ mov(res, 1, hi); 1881 __ mvn(res, 0, lo); 1882 __ bind(done); 1883 } 1884 } 1885 1886 1887 void LIR_Assembler::align_call(LIR_Code code) { 1888 // Not needed 1889 } 1890 1891 1892 void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) { 1893 int ret_addr_offset = __ patchable_call(op->addr(), rtype); 1894 assert(ret_addr_offset == __ offset(), "embedded return address not allowed"); 1895 add_call_info_here(op->info()); 1896 } 1897 1898 1899 void LIR_Assembler::ic_call(LIR_OpJavaCall *op) { 1900 bool near_range = __ cache_fully_reachable(); 1901 address oop_address = pc(); 1902 1903 bool use_movw = VM_Version::supports_movw(); 1904 1905 // Ricklass may contain something that is not a metadata pointer so 1906 // mov_metadata can't be used 1907 InlinedAddress value((address)Universe::non_oop_word()); 1908 InlinedAddress addr(op->addr()); 1909 if (use_movw) { 1910 __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff); 1911 __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16); 1912 } else { 1913 // No movw/movt, must be load a pc relative value but no 1914 // relocation so no metadata table to load from. 1915 // Use a b instruction rather than a bl, inline constant after the 1916 // branch, use a PC relative ldr to load the constant, arrange for 1917 // the call to return after the constant(s). 1918 __ ldr_literal(Ricklass, value); 1919 } 1920 __ relocate(virtual_call_Relocation::spec(oop_address)); 1921 if (near_range && use_movw) { 1922 __ bl(op->addr()); 1923 } else { 1924 Label call_return; 1925 __ adr(LR, call_return); 1926 if (near_range) { 1927 __ b(op->addr()); 1928 } else { 1929 __ indirect_jump(addr, Rtemp); 1930 __ bind_literal(addr); 1931 } 1932 if (!use_movw) { 1933 __ bind_literal(value); 1934 } 1935 __ bind(call_return); 1936 } 1937 add_call_info(code_offset(), op->info()); 1938 } 1939 1940 void LIR_Assembler::emit_static_call_stub() { 1941 address call_pc = __ pc(); 1942 address stub = __ start_a_stub(call_stub_size()); 1943 if (stub == nullptr) { 1944 BAILOUT("static call stub overflow"); 1945 } 1946 1947 DEBUG_ONLY(int offset = code_offset();) 1948 1949 InlinedMetadata metadata_literal(nullptr); 1950 __ relocate(static_stub_Relocation::spec(call_pc)); 1951 // If not a single instruction, NativeMovConstReg::next_instruction_address() 1952 // must jump over the whole following ldr_literal. 1953 // (See CompiledStaticCall::set_to_interpreted()) 1954 #ifdef ASSERT 1955 address ldr_site = __ pc(); 1956 #endif 1957 __ ldr_literal(Rmethod, metadata_literal); 1958 assert(nativeMovConstReg_at(ldr_site)->next_instruction_address() == __ pc(), "Fix ldr_literal or its parsing"); 1959 bool near_range = __ cache_fully_reachable(); 1960 InlinedAddress dest((address)-1); 1961 if (near_range) { 1962 address branch_site = __ pc(); 1963 __ b(branch_site); // b to self maps to special NativeJump -1 destination 1964 } else { 1965 __ indirect_jump(dest, Rtemp); 1966 } 1967 __ bind_literal(metadata_literal); // includes spec_for_immediate reloc 1968 if (!near_range) { 1969 __ bind_literal(dest); // special NativeJump -1 destination 1970 } 1971 1972 assert(code_offset() - offset <= call_stub_size(), "overflow"); 1973 __ end_a_stub(); 1974 } 1975 1976 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1977 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1978 assert(exceptionPC->as_register() == Rexception_pc, "must match"); 1979 info->add_register_oop(exceptionOop); 1980 1981 Runtime1::StubID handle_id = compilation()->has_fpu_code() ? 1982 Runtime1::handle_exception_id : 1983 Runtime1::handle_exception_nofpu_id; 1984 Label return_address; 1985 __ adr(Rexception_pc, return_address); 1986 __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type); 1987 __ bind(return_address); 1988 add_call_info_here(info); // for exception handler 1989 } 1990 1991 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1992 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1993 __ b(_unwind_handler_entry); 1994 } 1995 1996 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 1997 AsmShift shift = lsl; 1998 switch (code) { 1999 case lir_shl: shift = lsl; break; 2000 case lir_shr: shift = asr; break; 2001 case lir_ushr: shift = lsr; break; 2002 default: ShouldNotReachHere(); 2003 } 2004 2005 if (dest->is_single_cpu()) { 2006 __ andr(Rtemp, count->as_register(), 31); 2007 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp)); 2008 } else if (dest->is_double_cpu()) { 2009 Register dest_lo = dest->as_register_lo(); 2010 Register dest_hi = dest->as_register_hi(); 2011 Register src_lo = left->as_register_lo(); 2012 Register src_hi = left->as_register_hi(); 2013 Register Rcount = count->as_register(); 2014 // Resolve possible register conflicts 2015 if (shift == lsl && dest_hi == src_lo) { 2016 dest_hi = Rtemp; 2017 } else if (shift != lsl && dest_lo == src_hi) { 2018 dest_lo = Rtemp; 2019 } else if (dest_lo == src_lo && dest_hi == src_hi) { 2020 dest_lo = Rtemp; 2021 } else if (dest_lo == Rcount || dest_hi == Rcount) { 2022 Rcount = Rtemp; 2023 } 2024 __ andr(Rcount, count->as_register(), 63); 2025 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount); 2026 move_regs(dest_lo, dest->as_register_lo()); 2027 move_regs(dest_hi, dest->as_register_hi()); 2028 } else { 2029 ShouldNotReachHere(); 2030 } 2031 } 2032 2033 2034 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2035 AsmShift shift = lsl; 2036 switch (code) { 2037 case lir_shl: shift = lsl; break; 2038 case lir_shr: shift = asr; break; 2039 case lir_ushr: shift = lsr; break; 2040 default: ShouldNotReachHere(); 2041 } 2042 2043 if (dest->is_single_cpu()) { 2044 count &= 31; 2045 if (count != 0) { 2046 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count)); 2047 } else { 2048 move_regs(left->as_register(), dest->as_register()); 2049 } 2050 } else if (dest->is_double_cpu()) { 2051 count &= 63; 2052 if (count != 0) { 2053 Register dest_lo = dest->as_register_lo(); 2054 Register dest_hi = dest->as_register_hi(); 2055 Register src_lo = left->as_register_lo(); 2056 Register src_hi = left->as_register_hi(); 2057 // Resolve possible register conflicts 2058 if (shift == lsl && dest_hi == src_lo) { 2059 dest_hi = Rtemp; 2060 } else if (shift != lsl && dest_lo == src_hi) { 2061 dest_lo = Rtemp; 2062 } 2063 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count); 2064 move_regs(dest_lo, dest->as_register_lo()); 2065 move_regs(dest_hi, dest->as_register_hi()); 2066 } else { 2067 __ long_move(dest->as_register_lo(), dest->as_register_hi(), 2068 left->as_register_lo(), left->as_register_hi()); 2069 } 2070 } else { 2071 ShouldNotReachHere(); 2072 } 2073 } 2074 2075 2076 // Saves 4 given registers in reserved argument area. 2077 void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2078 verify_reserved_argument_area_size(4); 2079 __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4)); 2080 } 2081 2082 // Restores 4 given registers from reserved argument area. 2083 void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2084 __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback); 2085 } 2086 2087 2088 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2089 ciArrayKlass* default_type = op->expected_type(); 2090 Register src = op->src()->as_register(); 2091 Register src_pos = op->src_pos()->as_register(); 2092 Register dst = op->dst()->as_register(); 2093 Register dst_pos = op->dst_pos()->as_register(); 2094 Register length = op->length()->as_register(); 2095 Register tmp = op->tmp()->as_register(); 2096 Register tmp2 = Rtemp; 2097 2098 assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption"); 2099 2100 CodeStub* stub = op->stub(); 2101 2102 int flags = op->flags(); 2103 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 2104 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2105 2106 // If we don't know anything or it's an object array, just go through the generic arraycopy 2107 if (default_type == nullptr) { 2108 2109 // save arguments, because they will be killed by a runtime call 2110 save_in_reserved_area(R0, R1, R2, R3); 2111 2112 // pass length argument on SP[0] 2113 __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment 2114 2115 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2116 assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); 2117 #ifndef PRODUCT 2118 if (PrintC1Statistics) { 2119 __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2); 2120 } 2121 #endif // !PRODUCT 2122 // the stub is in the code cache so close enough 2123 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2124 2125 __ add(SP, SP, 2*wordSize); 2126 2127 __ cbz_32(R0, *stub->continuation()); 2128 2129 __ mvn_32(tmp, R0); 2130 restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only 2131 __ sub_32(length, length, tmp); 2132 __ add_32(src_pos, src_pos, tmp); 2133 __ add_32(dst_pos, dst_pos, tmp); 2134 2135 __ b(*stub->entry()); 2136 2137 __ bind(*stub->continuation()); 2138 return; 2139 } 2140 2141 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), 2142 "must be true at this point"); 2143 int elem_size = type2aelembytes(basic_type); 2144 int shift = exact_log2(elem_size); 2145 2146 // Check for null 2147 if (flags & LIR_OpArrayCopy::src_null_check) { 2148 if (flags & LIR_OpArrayCopy::dst_null_check) { 2149 __ cmp(src, 0); 2150 __ cond_cmp(dst, 0, ne); // make one instruction shorter if both checks are needed 2151 __ b(*stub->entry(), eq); 2152 } else { 2153 __ cbz(src, *stub->entry()); 2154 } 2155 } else if (flags & LIR_OpArrayCopy::dst_null_check) { 2156 __ cbz(dst, *stub->entry()); 2157 } 2158 2159 // If the compiler was not able to prove that exact type of the source or the destination 2160 // of the arraycopy is an array type, check at runtime if the source or the destination is 2161 // an instance type. 2162 if (flags & LIR_OpArrayCopy::type_check) { 2163 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2164 __ load_klass(tmp, dst); 2165 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2166 __ mov_slow(tmp, Klass::_lh_neutral_value); 2167 __ cmp_32(tmp2, tmp); 2168 __ b(*stub->entry(), ge); 2169 } 2170 2171 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2172 __ load_klass(tmp, src); 2173 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2174 __ mov_slow(tmp, Klass::_lh_neutral_value); 2175 __ cmp_32(tmp2, tmp); 2176 __ b(*stub->entry(), ge); 2177 } 2178 } 2179 2180 // Check if negative 2181 const int all_positive_checks = LIR_OpArrayCopy::src_pos_positive_check | 2182 LIR_OpArrayCopy::dst_pos_positive_check | 2183 LIR_OpArrayCopy::length_positive_check; 2184 switch (flags & all_positive_checks) { 2185 case LIR_OpArrayCopy::src_pos_positive_check: 2186 __ branch_if_negative_32(src_pos, *stub->entry()); 2187 break; 2188 case LIR_OpArrayCopy::dst_pos_positive_check: 2189 __ branch_if_negative_32(dst_pos, *stub->entry()); 2190 break; 2191 case LIR_OpArrayCopy::length_positive_check: 2192 __ branch_if_negative_32(length, *stub->entry()); 2193 break; 2194 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::dst_pos_positive_check: 2195 __ branch_if_any_negative_32(src_pos, dst_pos, tmp, *stub->entry()); 2196 break; 2197 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2198 __ branch_if_any_negative_32(src_pos, length, tmp, *stub->entry()); 2199 break; 2200 case LIR_OpArrayCopy::dst_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2201 __ branch_if_any_negative_32(dst_pos, length, tmp, *stub->entry()); 2202 break; 2203 case all_positive_checks: 2204 __ branch_if_any_negative_32(src_pos, dst_pos, length, tmp, *stub->entry()); 2205 break; 2206 default: 2207 assert((flags & all_positive_checks) == 0, "the last option"); 2208 } 2209 2210 // Range checks 2211 if (flags & LIR_OpArrayCopy::src_range_check) { 2212 __ ldr_s32(tmp2, Address(src, arrayOopDesc::length_offset_in_bytes())); 2213 __ add_32(tmp, src_pos, length); 2214 __ cmp_32(tmp, tmp2); 2215 __ b(*stub->entry(), hi); 2216 } 2217 if (flags & LIR_OpArrayCopy::dst_range_check) { 2218 __ ldr_s32(tmp2, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2219 __ add_32(tmp, dst_pos, length); 2220 __ cmp_32(tmp, tmp2); 2221 __ b(*stub->entry(), hi); 2222 } 2223 2224 // Check if src and dst are of the same type 2225 if (flags & LIR_OpArrayCopy::type_check) { 2226 // We don't know the array types are compatible 2227 if (basic_type != T_OBJECT) { 2228 // Simple test for basic type arrays 2229 if (UseCompressedClassPointers) { 2230 // We don't need decode because we just need to compare 2231 __ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes())); 2232 __ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes())); 2233 __ cmp_32(tmp, tmp2); 2234 } else { 2235 __ load_klass(tmp, src); 2236 __ load_klass(tmp2, dst); 2237 __ cmp(tmp, tmp2); 2238 } 2239 __ b(*stub->entry(), ne); 2240 } else { 2241 // For object arrays, if src is a sub class of dst then we can 2242 // safely do the copy. 2243 Label cont, slow; 2244 2245 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2246 2247 __ load_klass(tmp, src); 2248 __ load_klass(tmp2, dst); 2249 2250 // We are at a call so all live registers are saved before we 2251 // get here 2252 assert_different_registers(tmp, tmp2, R6, altFP_7_11); 2253 2254 __ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == nullptr ? stub->entry() : &slow, nullptr); 2255 2256 __ mov(R6, R0); 2257 __ mov(altFP_7_11, R1); 2258 __ mov(R0, tmp); 2259 __ mov(R1, tmp2); 2260 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp 2261 __ cmp_32(R0, 0); 2262 __ mov(R0, R6); 2263 __ mov(R1, altFP_7_11); 2264 2265 if (copyfunc_addr != nullptr) { // use stub if available 2266 // src is not a sub class of dst so we have to do a 2267 // per-element check. 2268 2269 __ b(cont, ne); 2270 2271 __ bind(slow); 2272 2273 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2274 if ((flags & mask) != mask) { 2275 // Check that at least both of them object arrays. 2276 assert(flags & mask, "one of the two should be known to be an object array"); 2277 2278 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2279 __ load_klass(tmp, src); 2280 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2281 __ load_klass(tmp, dst); 2282 } 2283 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2284 2285 __ ldr_u32(tmp2, Address(tmp, lh_offset)); 2286 2287 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2288 __ mov_slow(tmp, objArray_lh); 2289 __ cmp_32(tmp, tmp2); 2290 __ b(*stub->entry(), ne); 2291 } 2292 2293 save_in_reserved_area(R0, R1, R2, R3); 2294 2295 Register src_ptr = R0; 2296 Register dst_ptr = R1; 2297 Register len = R2; 2298 Register chk_off = R3; 2299 Register super_k = tmp; 2300 2301 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2302 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2303 2304 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2305 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2306 __ load_klass(tmp, dst); 2307 2308 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2309 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2310 2311 __ ldr(super_k, Address(tmp, ek_offset)); 2312 2313 __ mov(len, length); 2314 __ ldr_u32(chk_off, Address(super_k, sco_offset)); 2315 __ push(super_k); 2316 2317 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2318 2319 #ifndef PRODUCT 2320 if (PrintC1Statistics) { 2321 Label failed; 2322 __ cbnz_32(R0, failed); 2323 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2); 2324 __ bind(failed); 2325 } 2326 #endif // PRODUCT 2327 2328 __ add(SP, SP, wordSize); // Drop super_k argument 2329 2330 __ cbz_32(R0, *stub->continuation()); 2331 __ mvn_32(tmp, R0); 2332 2333 // load saved arguments in slow case only 2334 restore_from_reserved_area(R0, R1, R2, R3); 2335 2336 __ sub_32(length, length, tmp); 2337 __ add_32(src_pos, src_pos, tmp); 2338 __ add_32(dst_pos, dst_pos, tmp); 2339 2340 #ifndef PRODUCT 2341 if (PrintC1Statistics) { 2342 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2); 2343 } 2344 #endif 2345 2346 __ b(*stub->entry()); 2347 2348 __ bind(cont); 2349 } else { 2350 __ b(*stub->entry(), eq); 2351 __ bind(cont); 2352 } 2353 } 2354 } 2355 2356 #ifndef PRODUCT 2357 if (PrintC1Statistics) { 2358 address counter = Runtime1::arraycopy_count_address(basic_type); 2359 __ inc_counter(counter, tmp, tmp2); 2360 } 2361 #endif // !PRODUCT 2362 2363 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2364 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2365 const char *name; 2366 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2367 2368 Register src_ptr = R0; 2369 Register dst_ptr = R1; 2370 Register len = R2; 2371 2372 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2373 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2374 2375 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2376 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2377 2378 __ mov(len, length); 2379 2380 __ call(entry, relocInfo::runtime_call_type); 2381 2382 __ bind(*stub->continuation()); 2383 } 2384 2385 #ifdef ASSERT 2386 // emit run-time assertion 2387 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2388 assert(op->code() == lir_assert, "must be"); 2389 2390 if (op->in_opr1()->is_valid()) { 2391 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 2392 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 2393 } else { 2394 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 2395 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 2396 } 2397 2398 Label ok; 2399 if (op->condition() != lir_cond_always) { 2400 AsmCondition acond = al; 2401 switch (op->condition()) { 2402 case lir_cond_equal: acond = eq; break; 2403 case lir_cond_notEqual: acond = ne; break; 2404 case lir_cond_less: acond = lt; break; 2405 case lir_cond_lessEqual: acond = le; break; 2406 case lir_cond_greaterEqual: acond = ge; break; 2407 case lir_cond_greater: acond = gt; break; 2408 case lir_cond_aboveEqual: acond = hs; break; 2409 case lir_cond_belowEqual: acond = ls; break; 2410 default: ShouldNotReachHere(); 2411 } 2412 __ b(ok, acond); 2413 } 2414 if (op->halt()) { 2415 const char* str = __ code_string(op->msg()); 2416 __ stop(str); 2417 } else { 2418 breakpoint(); 2419 } 2420 __ bind(ok); 2421 } 2422 #endif // ASSERT 2423 2424 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2425 fatal("CRC32 intrinsic is not implemented on this platform"); 2426 } 2427 2428 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2429 Register obj = op->obj_opr()->as_pointer_register(); 2430 Register hdr = op->hdr_opr()->as_pointer_register(); 2431 Register lock = op->lock_opr()->as_pointer_register(); 2432 2433 if (LockingMode == LM_MONITOR) { 2434 if (op->info() != nullptr) { 2435 add_debug_info_for_null_check_here(op->info()); 2436 __ null_check(obj); 2437 } 2438 __ b(*op->stub()->entry()); 2439 } else if (op->code() == lir_lock) { 2440 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2441 int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2442 if (op->info() != nullptr) { 2443 add_debug_info_for_null_check(null_check_offset, op->info()); 2444 } 2445 } else if (op->code() == lir_unlock) { 2446 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2447 } else { 2448 ShouldNotReachHere(); 2449 } 2450 __ bind(*op->stub()->continuation()); 2451 } 2452 2453 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 2454 Register obj = op->obj()->as_pointer_register(); 2455 Register result = op->result_opr()->as_pointer_register(); 2456 2457 CodeEmitInfo* info = op->info(); 2458 if (info != nullptr) { 2459 add_debug_info_for_null_check_here(info); 2460 } 2461 2462 if (UseCompressedClassPointers) { // On 32 bit arm?? 2463 __ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2464 } else { 2465 __ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2466 } 2467 } 2468 2469 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2470 ciMethod* method = op->profiled_method(); 2471 int bci = op->profiled_bci(); 2472 ciMethod* callee = op->profiled_callee(); 2473 2474 // Update counter for all call types 2475 ciMethodData* md = method->method_data_or_null(); 2476 assert(md != nullptr, "Sanity"); 2477 ciProfileData* data = md->bci_to_data(bci); 2478 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 2479 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2480 Register mdo = op->mdo()->as_register(); 2481 assert(op->tmp1()->is_register(), "tmp1 must be allocated"); 2482 Register tmp1 = op->tmp1()->as_pointer_register(); 2483 assert_different_registers(mdo, tmp1); 2484 __ mov_metadata(mdo, md->constant_encoding()); 2485 int mdo_offset_bias = 0; 2486 int max_offset = 4096; 2487 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) { 2488 // The offset is large so bias the mdo by the base of the slot so 2489 // that the ldr can use an immediate offset to reference the slots of the data 2490 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2491 __ mov_slow(tmp1, mdo_offset_bias); 2492 __ add(mdo, mdo, tmp1); 2493 } 2494 2495 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2496 // Perform additional virtual call profiling for invokevirtual and 2497 // invokeinterface bytecodes 2498 if (op->should_profile_receiver_type()) { 2499 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2500 Register recv = op->recv()->as_register(); 2501 assert_different_registers(mdo, tmp1, recv); 2502 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2503 ciKlass* known_klass = op->known_holder(); 2504 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 2505 // We know the type that will be seen at this call site; we can 2506 // statically update the MethodData* rather than needing to do 2507 // dynamic tests on the receiver type 2508 2509 // NOTE: we should probably put a lock around this search to 2510 // avoid collisions by concurrent compilations 2511 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2512 uint i; 2513 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2514 ciKlass* receiver = vc_data->receiver(i); 2515 if (known_klass->equals(receiver)) { 2516 Address data_addr(mdo, md->byte_offset_of_slot(data, 2517 VirtualCallData::receiver_count_offset(i)) - 2518 mdo_offset_bias); 2519 __ ldr(tmp1, data_addr); 2520 __ add(tmp1, tmp1, DataLayout::counter_increment); 2521 __ str(tmp1, data_addr); 2522 return; 2523 } 2524 } 2525 2526 // Receiver type not found in profile data; select an empty slot 2527 2528 // Note that this is less efficient than it should be because it 2529 // always does a write to the receiver part of the 2530 // VirtualCallData rather than just the first time 2531 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2532 ciKlass* receiver = vc_data->receiver(i); 2533 if (receiver == nullptr) { 2534 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 2535 mdo_offset_bias); 2536 __ mov_metadata(tmp1, known_klass->constant_encoding()); 2537 __ str(tmp1, recv_addr); 2538 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 2539 mdo_offset_bias); 2540 __ ldr(tmp1, data_addr); 2541 __ add(tmp1, tmp1, DataLayout::counter_increment); 2542 __ str(tmp1, data_addr); 2543 return; 2544 } 2545 } 2546 } else { 2547 __ load_klass(recv, recv); 2548 Label update_done; 2549 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2550 // Receiver did not match any saved receiver and there is no empty row for it. 2551 // Increment total counter to indicate polymorphic case. 2552 __ ldr(tmp1, counter_addr); 2553 __ add(tmp1, tmp1, DataLayout::counter_increment); 2554 __ str(tmp1, counter_addr); 2555 2556 __ bind(update_done); 2557 } 2558 } else { 2559 // Static call 2560 __ ldr(tmp1, counter_addr); 2561 __ add(tmp1, tmp1, DataLayout::counter_increment); 2562 __ str(tmp1, counter_addr); 2563 } 2564 } 2565 2566 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2567 fatal("Type profiling not implemented on this platform"); 2568 } 2569 2570 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) { 2571 Unimplemented(); 2572 } 2573 2574 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 2575 Unimplemented(); 2576 } 2577 2578 2579 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 2580 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2581 __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp()); 2582 } 2583 2584 2585 void LIR_Assembler::align_backward_branch_target() { 2586 // Some ARM processors do better with 8-byte branch target alignment 2587 __ align(8); 2588 } 2589 2590 2591 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2592 // tmp must be unused 2593 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2594 2595 if (left->is_single_cpu()) { 2596 assert (dest->type() == T_INT, "unexpected result type"); 2597 assert (left->type() == T_INT, "unexpected left type"); 2598 __ neg_32(dest->as_register(), left->as_register()); 2599 } else if (left->is_double_cpu()) { 2600 Register dest_lo = dest->as_register_lo(); 2601 Register dest_hi = dest->as_register_hi(); 2602 Register src_lo = left->as_register_lo(); 2603 Register src_hi = left->as_register_hi(); 2604 if (dest_lo == src_hi) { 2605 dest_lo = Rtemp; 2606 } 2607 __ rsbs(dest_lo, src_lo, 0); 2608 __ rsc(dest_hi, src_hi, 0); 2609 move_regs(dest_lo, dest->as_register_lo()); 2610 } else if (left->is_single_fpu()) { 2611 __ neg_float(dest->as_float_reg(), left->as_float_reg()); 2612 } else if (left->is_double_fpu()) { 2613 __ neg_double(dest->as_double_reg(), left->as_double_reg()); 2614 } else { 2615 ShouldNotReachHere(); 2616 } 2617 } 2618 2619 2620 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2621 assert(patch_code == lir_patch_none, "Patch code not supported"); 2622 LIR_Address* addr = addr_opr->as_address_ptr(); 2623 if (addr->index()->is_illegal()) { 2624 jint c = addr->disp(); 2625 if (!Assembler::is_arith_imm_in_range(c)) { 2626 BAILOUT("illegal arithmetic operand"); 2627 } 2628 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c); 2629 } else { 2630 assert(addr->disp() == 0, "cannot handle otherwise"); 2631 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), 2632 AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale())); 2633 } 2634 } 2635 2636 2637 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2638 assert(!tmp->is_valid(), "don't need temporary"); 2639 __ call(dest); 2640 if (info != nullptr) { 2641 add_call_info_here(info); 2642 } 2643 } 2644 2645 2646 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2647 assert(src->is_double_cpu() && dest->is_address() || 2648 src->is_address() && dest->is_double_cpu(), 2649 "Simple move_op is called for all other cases"); 2650 2651 int null_check_offset; 2652 if (dest->is_address()) { 2653 // Store 2654 const LIR_Address* addr = dest->as_address_ptr(); 2655 const Register src_lo = src->as_register_lo(); 2656 const Register src_hi = src->as_register_hi(); 2657 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2658 2659 if (src_lo < src_hi) { 2660 null_check_offset = __ offset(); 2661 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi)); 2662 } else { 2663 assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2664 __ mov(Rtemp, src_hi); 2665 null_check_offset = __ offset(); 2666 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp)); 2667 } 2668 } else { 2669 // Load 2670 const LIR_Address* addr = src->as_address_ptr(); 2671 const Register dest_lo = dest->as_register_lo(); 2672 const Register dest_hi = dest->as_register_hi(); 2673 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2674 2675 null_check_offset = __ offset(); 2676 if (dest_lo < dest_hi) { 2677 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi)); 2678 } else { 2679 assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2680 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp)); 2681 __ mov(dest_hi, Rtemp); 2682 } 2683 } 2684 2685 if (info != nullptr) { 2686 add_debug_info_for_null_check(null_check_offset, info); 2687 } 2688 } 2689 2690 2691 void LIR_Assembler::membar() { 2692 __ membar(MacroAssembler::StoreLoad, Rtemp); 2693 } 2694 2695 void LIR_Assembler::membar_acquire() { 2696 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 2697 } 2698 2699 void LIR_Assembler::membar_release() { 2700 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2701 } 2702 2703 void LIR_Assembler::membar_loadload() { 2704 __ membar(MacroAssembler::LoadLoad, Rtemp); 2705 } 2706 2707 void LIR_Assembler::membar_storestore() { 2708 __ membar(MacroAssembler::StoreStore, Rtemp); 2709 } 2710 2711 void LIR_Assembler::membar_loadstore() { 2712 __ membar(MacroAssembler::LoadStore, Rtemp); 2713 } 2714 2715 void LIR_Assembler::membar_storeload() { 2716 __ membar(MacroAssembler::StoreLoad, Rtemp); 2717 } 2718 2719 void LIR_Assembler::on_spin_wait() { 2720 Unimplemented(); 2721 } 2722 2723 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2724 // Not used on ARM 2725 Unimplemented(); 2726 } 2727 2728 void LIR_Assembler::peephole(LIR_List* lir) { 2729 LIR_OpList* inst = lir->instructions_list(); 2730 const int inst_length = inst->length(); 2731 for (int i = 0; i < inst_length; i++) { 2732 LIR_Op* op = inst->at(i); 2733 switch (op->code()) { 2734 case lir_cmp: { 2735 // Replace: 2736 // cmp rX, y 2737 // cmove [EQ] y, z, rX 2738 // with 2739 // cmp rX, y 2740 // cmove [EQ] illegalOpr, z, rX 2741 // 2742 // or 2743 // cmp rX, y 2744 // cmove [NE] z, y, rX 2745 // with 2746 // cmp rX, y 2747 // cmove [NE] z, illegalOpr, rX 2748 // 2749 // moves from illegalOpr should be removed when converting LIR to native assembly 2750 2751 LIR_Op2* cmp = op->as_Op2(); 2752 assert(cmp != nullptr, "cmp LIR instruction is not an op2"); 2753 2754 if (i + 1 < inst_length) { 2755 LIR_Op2* cmove = inst->at(i + 1)->as_Op2(); 2756 if (cmove != nullptr && cmove->code() == lir_cmove) { 2757 LIR_Opr cmove_res = cmove->result_opr(); 2758 bool res_is_op1 = cmove_res == cmp->in_opr1(); 2759 bool res_is_op2 = cmove_res == cmp->in_opr2(); 2760 LIR_Opr cmp_res, cmp_arg; 2761 if (res_is_op1) { 2762 cmp_res = cmp->in_opr1(); 2763 cmp_arg = cmp->in_opr2(); 2764 } else if (res_is_op2) { 2765 cmp_res = cmp->in_opr2(); 2766 cmp_arg = cmp->in_opr1(); 2767 } else { 2768 cmp_res = LIR_OprFact::illegalOpr; 2769 cmp_arg = LIR_OprFact::illegalOpr; 2770 } 2771 2772 if (cmp_res != LIR_OprFact::illegalOpr) { 2773 LIR_Condition cond = cmove->condition(); 2774 if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) { 2775 cmove->set_in_opr1(LIR_OprFact::illegalOpr); 2776 } else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) { 2777 cmove->set_in_opr2(LIR_OprFact::illegalOpr); 2778 } 2779 } 2780 } 2781 } 2782 break; 2783 } 2784 2785 default: 2786 break; 2787 } 2788 } 2789 } 2790 2791 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2792 assert(src->is_address(), "sanity"); 2793 Address addr = as_Address(src->as_address_ptr()); 2794 2795 if (code == lir_xchg) { 2796 } else { 2797 assert (!data->is_oop(), "xadd for oops"); 2798 } 2799 2800 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2801 2802 Label retry; 2803 __ bind(retry); 2804 2805 if (data->type() == T_INT || data->is_oop()) { 2806 Register dst = dest->as_register(); 2807 Register new_val = noreg; 2808 __ ldrex(dst, addr); 2809 if (code == lir_xadd) { 2810 Register tmp_reg = tmp->as_register(); 2811 if (data->is_constant()) { 2812 assert_different_registers(dst, tmp_reg); 2813 __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint()); 2814 } else { 2815 assert_different_registers(dst, tmp_reg, data->as_register()); 2816 __ add_32(tmp_reg, dst, data->as_register()); 2817 } 2818 new_val = tmp_reg; 2819 } else { 2820 if (UseCompressedOops && data->is_oop()) { 2821 new_val = tmp->as_pointer_register(); 2822 } else { 2823 new_val = data->as_register(); 2824 } 2825 assert_different_registers(dst, new_val); 2826 } 2827 __ strex(Rtemp, new_val, addr); 2828 2829 } else if (data->type() == T_LONG) { 2830 Register dst_lo = dest->as_register_lo(); 2831 Register new_val_lo = noreg; 2832 Register dst_hi = dest->as_register_hi(); 2833 2834 assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair"); 2835 assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2836 2837 __ bind(retry); 2838 __ ldrexd(dst_lo, addr); 2839 if (code == lir_xadd) { 2840 Register tmp_lo = tmp->as_register_lo(); 2841 Register tmp_hi = tmp->as_register_hi(); 2842 2843 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 2844 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2845 2846 if (data->is_constant()) { 2847 jlong c = data->as_constant_ptr()->as_jlong(); 2848 assert((jlong)((jint)c) == c, "overflow"); 2849 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi); 2850 __ adds(tmp_lo, dst_lo, (jint)c); 2851 __ adc(tmp_hi, dst_hi, 0); 2852 } else { 2853 Register new_val_lo = data->as_register_lo(); 2854 Register new_val_hi = data->as_register_hi(); 2855 __ adds(tmp_lo, dst_lo, new_val_lo); 2856 __ adc(tmp_hi, dst_hi, new_val_hi); 2857 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi); 2858 } 2859 new_val_lo = tmp_lo; 2860 } else { 2861 new_val_lo = data->as_register_lo(); 2862 Register new_val_hi = data->as_register_hi(); 2863 2864 assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi); 2865 assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair"); 2866 assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2867 } 2868 __ strexd(Rtemp, new_val_lo, addr); 2869 } else { 2870 ShouldNotReachHere(); 2871 } 2872 2873 __ cbnz_32(Rtemp, retry); 2874 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 2875 2876 } 2877 2878 #undef __