1 /* 2 * Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "memory/universe.hpp" 36 #include "nativeInst_arm.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/powerOfTwo.hpp" 42 #include "vmreg_arm.inline.hpp" 43 44 #define __ _masm-> 45 46 // Note: Rtemp usage is this file should not impact C2 and should be 47 // correct as long as it is not implicitly used in lower layers (the 48 // arm [macro]assembler) and used with care in the other C1 specific 49 // files. 50 51 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 52 ShouldNotCallThis(); // Not used on ARM 53 return false; 54 } 55 56 57 LIR_Opr LIR_Assembler::receiverOpr() { 58 // The first register in Java calling conventions 59 return FrameMap::R0_oop_opr; 60 } 61 62 LIR_Opr LIR_Assembler::osrBufferPointer() { 63 return FrameMap::as_pointer_opr(R0); 64 } 65 66 #ifndef PRODUCT 67 void LIR_Assembler::verify_reserved_argument_area_size(int args_count) { 68 assert(args_count * wordSize <= frame_map()->reserved_argument_area_size(), "not enough space for arguments"); 69 } 70 #endif // !PRODUCT 71 72 void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) { 73 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 74 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 75 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 76 __ mov_slow(Rtemp, c); 77 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 78 } 79 80 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_sp_in_words) { 81 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 82 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 83 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 84 __ mov_metadata(Rtemp, m); 85 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 86 } 87 88 //--------------fpu register translations----------------------- 89 90 91 void LIR_Assembler::breakpoint() { 92 __ breakpoint(); 93 } 94 95 void LIR_Assembler::push(LIR_Opr opr) { 96 Unimplemented(); 97 } 98 99 void LIR_Assembler::pop(LIR_Opr opr) { 100 Unimplemented(); 101 } 102 103 //------------------------------------------- 104 Address LIR_Assembler::as_Address(LIR_Address* addr) { 105 Register base = addr->base()->as_pointer_register(); 106 107 108 if (addr->index()->is_illegal() || addr->index()->is_constant()) { 109 int offset = addr->disp(); 110 if (addr->index()->is_constant()) { 111 offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale(); 112 } 113 114 if ((offset <= -4096) || (offset >= 4096)) { 115 BAILOUT_("offset not in range", Address(base)); 116 } 117 118 return Address(base, offset); 119 120 } else { 121 assert(addr->disp() == 0, "can't have both"); 122 int scale = addr->scale(); 123 124 assert(addr->index()->is_single_cpu(), "should be"); 125 return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) : 126 Address(base, addr->index()->as_register(), lsr, -scale); 127 } 128 } 129 130 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 131 Address base = as_Address(addr); 132 assert(base.index() == noreg, "must be"); 133 if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); } 134 return Address(base.base(), base.disp() + BytesPerWord); 135 } 136 137 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 138 return as_Address(addr); 139 } 140 141 142 void LIR_Assembler::osr_entry() { 143 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 144 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 145 ValueStack* entry_state = osr_entry->end()->state(); 146 int number_of_locks = entry_state->locks_size(); 147 148 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 149 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 150 151 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 152 int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord; 153 for (int i = 0; i < number_of_locks; i++) { 154 int slot_offset = monitor_offset - (i * 2 * BytesPerWord); 155 __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord)); 156 __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 157 __ str(R1, frame_map()->address_for_monitor_lock(i)); 158 __ str(R2, frame_map()->address_for_monitor_object(i)); 159 } 160 } 161 162 163 int LIR_Assembler::check_icache() { 164 Register receiver = LIR_Assembler::receiverOpr()->as_register(); 165 int offset = __ offset(); 166 __ inline_cache_check(receiver, Ricklass); 167 return offset; 168 } 169 170 void LIR_Assembler::clinit_barrier(ciMethod* method) { 171 ShouldNotReachHere(); // not implemented 172 } 173 174 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 175 jobject o = (jobject)Universe::non_oop_word(); 176 int index = __ oop_recorder()->allocate_oop_index(o); 177 178 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index); 179 180 __ patchable_mov_oop(reg, o, index); 181 patching_epilog(patch, lir_patch_normal, reg, info); 182 } 183 184 185 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 186 Metadata* o = (Metadata*)Universe::non_oop_word(); 187 int index = __ oop_recorder()->allocate_metadata_index(o); 188 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 189 190 __ patchable_mov_metadata(reg, o, index); 191 patching_epilog(patch, lir_patch_normal, reg, info); 192 } 193 194 195 int LIR_Assembler::initial_frame_size_in_bytes() const { 196 // Subtracts two words to account for return address and link 197 return frame_map()->framesize()*VMRegImpl::stack_slot_size - 2*wordSize; 198 } 199 200 201 int LIR_Assembler::emit_exception_handler() { 202 // TODO: ARM 203 __ nop(); // See comments in other ports 204 205 address handler_base = __ start_a_stub(exception_handler_size()); 206 if (handler_base == NULL) { 207 bailout("exception handler overflow"); 208 return -1; 209 } 210 211 int offset = code_offset(); 212 213 // check that there is really an exception 214 __ verify_not_null_oop(Rexception_obj); 215 216 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 217 __ should_not_reach_here(); 218 219 assert(code_offset() - offset <= exception_handler_size(), "overflow"); 220 __ end_a_stub(); 221 222 return offset; 223 } 224 225 // Emit the code to remove the frame from the stack in the exception 226 // unwind path. 227 int LIR_Assembler::emit_unwind_handler() { 228 #ifndef PRODUCT 229 if (CommentedAssembly) { 230 _masm->block_comment("Unwind handler"); 231 } 232 #endif 233 234 int offset = code_offset(); 235 236 // Fetch the exception from TLS and clear out exception related thread state 237 Register zero = __ zero_register(Rtemp); 238 __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset())); 239 __ str(zero, Address(Rthread, JavaThread::exception_oop_offset())); 240 __ str(zero, Address(Rthread, JavaThread::exception_pc_offset())); 241 242 __ bind(_unwind_handler_entry); 243 __ verify_not_null_oop(Rexception_obj); 244 245 // Preform needed unlocking 246 MonitorExitStub* stub = NULL; 247 if (method()->is_synchronized()) { 248 monitor_address(0, FrameMap::R0_opr); 249 stub = new MonitorExitStub(FrameMap::R0_opr, true, 0); 250 __ unlock_object(R2, R1, R0, Rtemp, *stub->entry()); 251 __ bind(*stub->continuation()); 252 } 253 254 // remove the activation and dispatch to the unwind handler 255 __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR 256 __ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp); 257 258 // Emit the slow path assembly 259 if (stub != NULL) { 260 stub->emit_code(this); 261 } 262 263 return offset; 264 } 265 266 267 int LIR_Assembler::emit_deopt_handler() { 268 address handler_base = __ start_a_stub(deopt_handler_size()); 269 if (handler_base == NULL) { 270 bailout("deopt handler overflow"); 271 return -1; 272 } 273 274 int offset = code_offset(); 275 276 __ mov_relative_address(LR, __ pc()); 277 __ push(LR); // stub expects LR to be saved 278 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg); 279 280 assert(code_offset() - offset <= deopt_handler_size(), "overflow"); 281 __ end_a_stub(); 282 283 return offset; 284 } 285 286 287 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 288 // Pop the frame before safepoint polling 289 __ remove_frame(initial_frame_size_in_bytes()); 290 __ read_polling_page(Rtemp, relocInfo::poll_return_type); 291 __ ret(); 292 } 293 294 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 295 296 int offset = __ offset(); 297 __ get_polling_page(Rtemp); 298 __ relocate(relocInfo::poll_type); 299 add_debug_info_for_branch(info); // help pc_desc_at to find correct scope for current PC 300 __ ldr(Rtemp, Address(Rtemp)); 301 302 return offset; 303 } 304 305 306 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 307 if (from_reg != to_reg) { 308 __ mov(to_reg, from_reg); 309 } 310 } 311 312 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 313 assert(src->is_constant() && dest->is_register(), "must be"); 314 LIR_Const* c = src->as_constant_ptr(); 315 316 switch (c->type()) { 317 case T_ADDRESS: 318 case T_INT: 319 assert(patch_code == lir_patch_none, "no patching handled here"); 320 __ mov_slow(dest->as_register(), c->as_jint()); 321 break; 322 323 case T_LONG: 324 assert(patch_code == lir_patch_none, "no patching handled here"); 325 __ mov_slow(dest->as_register_lo(), c->as_jint_lo()); 326 __ mov_slow(dest->as_register_hi(), c->as_jint_hi()); 327 break; 328 329 case T_OBJECT: 330 if (patch_code == lir_patch_none) { 331 __ mov_oop(dest->as_register(), c->as_jobject()); 332 } else { 333 jobject2reg_with_patching(dest->as_register(), info); 334 } 335 break; 336 337 case T_METADATA: 338 if (patch_code == lir_patch_none) { 339 __ mov_metadata(dest->as_register(), c->as_metadata()); 340 } else { 341 klass2reg_with_patching(dest->as_register(), info); 342 } 343 break; 344 345 case T_FLOAT: 346 if (dest->is_single_fpu()) { 347 __ mov_float(dest->as_float_reg(), c->as_jfloat()); 348 } else { 349 // Simple getters can return float constant directly into r0 350 __ mov_slow(dest->as_register(), c->as_jint_bits()); 351 } 352 break; 353 354 case T_DOUBLE: 355 if (dest->is_double_fpu()) { 356 __ mov_double(dest->as_double_reg(), c->as_jdouble()); 357 } else { 358 // Simple getters can return double constant directly into r1r0 359 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits()); 360 __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits()); 361 } 362 break; 363 364 default: 365 ShouldNotReachHere(); 366 } 367 } 368 369 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 370 assert(src->is_constant(), "must be"); 371 assert(dest->is_stack(), "must be"); 372 LIR_Const* c = src->as_constant_ptr(); 373 374 switch (c->type()) { 375 case T_INT: // fall through 376 case T_FLOAT: 377 __ mov_slow(Rtemp, c->as_jint_bits()); 378 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 379 break; 380 381 case T_ADDRESS: 382 __ mov_slow(Rtemp, c->as_jint()); 383 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 384 break; 385 386 case T_OBJECT: 387 __ mov_oop(Rtemp, c->as_jobject()); 388 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 389 break; 390 391 case T_LONG: // fall through 392 case T_DOUBLE: 393 __ mov_slow(Rtemp, c->as_jint_lo_bits()); 394 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 395 if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) { 396 __ mov_slow(Rtemp, c->as_jint_hi_bits()); 397 } 398 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 399 break; 400 401 default: 402 ShouldNotReachHere(); 403 } 404 } 405 406 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 407 CodeEmitInfo* info, bool wide) { 408 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == NULL),"cannot handle otherwise"); 409 __ mov(Rtemp, 0); 410 411 int null_check_offset = code_offset(); 412 __ str(Rtemp, as_Address(dest->as_address_ptr())); 413 414 if (info != NULL) { 415 assert(false, "arm32 didn't support this before, investigate if bug"); 416 add_debug_info_for_null_check(null_check_offset, info); 417 } 418 } 419 420 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 421 assert(src->is_register() && dest->is_register(), "must be"); 422 423 if (src->is_single_cpu()) { 424 if (dest->is_single_cpu()) { 425 move_regs(src->as_register(), dest->as_register()); 426 } else if (dest->is_single_fpu()) { 427 __ fmsr(dest->as_float_reg(), src->as_register()); 428 } else { 429 ShouldNotReachHere(); 430 } 431 } else if (src->is_double_cpu()) { 432 if (dest->is_double_cpu()) { 433 __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi()); 434 } else { 435 __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi()); 436 } 437 } else if (src->is_single_fpu()) { 438 if (dest->is_single_fpu()) { 439 __ mov_float(dest->as_float_reg(), src->as_float_reg()); 440 } else if (dest->is_single_cpu()) { 441 __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg()); 442 } else { 443 ShouldNotReachHere(); 444 } 445 } else if (src->is_double_fpu()) { 446 if (dest->is_double_fpu()) { 447 __ mov_double(dest->as_double_reg(), src->as_double_reg()); 448 } else if (dest->is_double_cpu()) { 449 __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg()); 450 } else { 451 ShouldNotReachHere(); 452 } 453 } else { 454 ShouldNotReachHere(); 455 } 456 } 457 458 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 459 assert(src->is_register(), "should not call otherwise"); 460 assert(dest->is_stack(), "should not call otherwise"); 461 462 Address addr = dest->is_single_word() ? 463 frame_map()->address_for_slot(dest->single_stack_ix()) : 464 frame_map()->address_for_slot(dest->double_stack_ix()); 465 466 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 467 if (src->is_single_fpu() || src->is_double_fpu()) { 468 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 469 } 470 471 if (src->is_single_cpu()) { 472 switch (type) { 473 case T_OBJECT: 474 case T_ARRAY: __ verify_oop(src->as_register()); // fall through 475 case T_ADDRESS: 476 case T_METADATA: __ str(src->as_register(), addr); break; 477 case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through 478 case T_INT: __ str_32(src->as_register(), addr); break; 479 default: 480 ShouldNotReachHere(); 481 } 482 } else if (src->is_double_cpu()) { 483 __ str(src->as_register_lo(), addr); 484 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 485 } else if (src->is_single_fpu()) { 486 __ str_float(src->as_float_reg(), addr); 487 } else if (src->is_double_fpu()) { 488 __ str_double(src->as_double_reg(), addr); 489 } else { 490 ShouldNotReachHere(); 491 } 492 } 493 494 495 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 496 LIR_PatchCode patch_code, CodeEmitInfo* info, 497 bool pop_fpu_stack, bool wide, 498 bool unaligned) { 499 LIR_Address* to_addr = dest->as_address_ptr(); 500 Register base_reg = to_addr->base()->as_pointer_register(); 501 const bool needs_patching = (patch_code != lir_patch_none); 502 503 PatchingStub* patch = NULL; 504 if (needs_patching) { 505 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 506 } 507 508 int null_check_offset = code_offset(); 509 510 switch (type) { 511 case T_ARRAY: 512 case T_OBJECT: 513 if (UseCompressedOops && !wide) { 514 ShouldNotReachHere(); 515 } else { 516 __ str(src->as_register(), as_Address(to_addr)); 517 } 518 break; 519 520 case T_ADDRESS: 521 __ str(src->as_pointer_register(), as_Address(to_addr)); 522 break; 523 524 case T_BYTE: 525 case T_BOOLEAN: 526 __ strb(src->as_register(), as_Address(to_addr)); 527 break; 528 529 case T_CHAR: 530 case T_SHORT: 531 __ strh(src->as_register(), as_Address(to_addr)); 532 break; 533 534 case T_INT: 535 #ifdef __SOFTFP__ 536 case T_FLOAT: 537 #endif // __SOFTFP__ 538 __ str_32(src->as_register(), as_Address(to_addr)); 539 break; 540 541 542 #ifdef __SOFTFP__ 543 case T_DOUBLE: 544 #endif // __SOFTFP__ 545 case T_LONG: { 546 Register from_lo = src->as_register_lo(); 547 Register from_hi = src->as_register_hi(); 548 if (to_addr->index()->is_register()) { 549 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 550 assert(to_addr->disp() == 0, "Not yet supporting both"); 551 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 552 base_reg = Rtemp; 553 __ str(from_lo, Address(Rtemp)); 554 if (patch != NULL) { 555 __ nop(); // see comment before patching_epilog for 2nd str 556 patching_epilog(patch, lir_patch_low, base_reg, info); 557 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 558 patch_code = lir_patch_high; 559 } 560 __ str(from_hi, Address(Rtemp, BytesPerWord)); 561 } else if (base_reg == from_lo) { 562 __ str(from_hi, as_Address_hi(to_addr)); 563 if (patch != NULL) { 564 __ nop(); // see comment before patching_epilog for 2nd str 565 patching_epilog(patch, lir_patch_high, base_reg, info); 566 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 567 patch_code = lir_patch_low; 568 } 569 __ str(from_lo, as_Address_lo(to_addr)); 570 } else { 571 __ str(from_lo, as_Address_lo(to_addr)); 572 if (patch != NULL) { 573 __ nop(); // see comment before patching_epilog for 2nd str 574 patching_epilog(patch, lir_patch_low, base_reg, info); 575 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 576 patch_code = lir_patch_high; 577 } 578 __ str(from_hi, as_Address_hi(to_addr)); 579 } 580 break; 581 } 582 583 #ifndef __SOFTFP__ 584 case T_FLOAT: 585 if (to_addr->index()->is_register()) { 586 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 587 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 588 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 589 __ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp())); 590 } else { 591 __ fsts(src->as_float_reg(), as_Address(to_addr)); 592 } 593 break; 594 595 case T_DOUBLE: 596 if (to_addr->index()->is_register()) { 597 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 598 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 599 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 600 __ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp())); 601 } else { 602 __ fstd(src->as_double_reg(), as_Address(to_addr)); 603 } 604 break; 605 #endif // __SOFTFP__ 606 607 608 default: 609 ShouldNotReachHere(); 610 } 611 612 if (info != NULL) { 613 add_debug_info_for_null_check(null_check_offset, info); 614 } 615 616 if (patch != NULL) { 617 // Offset embedded into LDR/STR instruction may appear not enough 618 // to address a field. So, provide a space for one more instruction 619 // that will deal with larger offsets. 620 __ nop(); 621 patching_epilog(patch, patch_code, base_reg, info); 622 } 623 } 624 625 626 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 627 assert(src->is_stack(), "should not call otherwise"); 628 assert(dest->is_register(), "should not call otherwise"); 629 630 Address addr = src->is_single_word() ? 631 frame_map()->address_for_slot(src->single_stack_ix()) : 632 frame_map()->address_for_slot(src->double_stack_ix()); 633 634 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 635 if (dest->is_single_fpu() || dest->is_double_fpu()) { 636 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 637 } 638 639 if (dest->is_single_cpu()) { 640 switch (type) { 641 case T_OBJECT: 642 case T_ARRAY: 643 case T_ADDRESS: 644 case T_METADATA: __ ldr(dest->as_register(), addr); break; 645 case T_FLOAT: // used in floatToRawIntBits intrinsic implemenation 646 case T_INT: __ ldr_u32(dest->as_register(), addr); break; 647 default: 648 ShouldNotReachHere(); 649 } 650 if ((type == T_OBJECT) || (type == T_ARRAY)) { 651 __ verify_oop(dest->as_register()); 652 } 653 } else if (dest->is_double_cpu()) { 654 __ ldr(dest->as_register_lo(), addr); 655 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 656 } else if (dest->is_single_fpu()) { 657 __ ldr_float(dest->as_float_reg(), addr); 658 } else if (dest->is_double_fpu()) { 659 __ ldr_double(dest->as_double_reg(), addr); 660 } else { 661 ShouldNotReachHere(); 662 } 663 } 664 665 666 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 667 if (src->is_single_stack()) { 668 switch (src->type()) { 669 case T_OBJECT: 670 case T_ARRAY: 671 case T_ADDRESS: 672 case T_METADATA: 673 __ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 674 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 675 break; 676 677 case T_INT: 678 case T_FLOAT: 679 __ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 680 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 681 break; 682 683 default: 684 ShouldNotReachHere(); 685 } 686 } else { 687 assert(src->is_double_stack(), "must be"); 688 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes)); 689 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 690 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 691 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 692 } 693 } 694 695 696 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, 697 LIR_PatchCode patch_code, CodeEmitInfo* info, 698 bool wide, bool unaligned) { 699 assert(src->is_address(), "should not call otherwise"); 700 assert(dest->is_register(), "should not call otherwise"); 701 LIR_Address* addr = src->as_address_ptr(); 702 703 Register base_reg = addr->base()->as_pointer_register(); 704 705 PatchingStub* patch = NULL; 706 if (patch_code != lir_patch_none) { 707 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 708 } 709 if (info != NULL) { 710 add_debug_info_for_null_check_here(info); 711 } 712 713 switch (type) { 714 case T_OBJECT: // fall through 715 case T_ARRAY: 716 if (UseCompressedOops && !wide) { 717 __ ldr_u32(dest->as_register(), as_Address(addr)); 718 } else { 719 __ ldr(dest->as_register(), as_Address(addr)); 720 } 721 break; 722 723 case T_ADDRESS: 724 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 725 __ ldr_u32(dest->as_pointer_register(), as_Address(addr)); 726 } else { 727 __ ldr(dest->as_pointer_register(), as_Address(addr)); 728 } 729 break; 730 731 case T_INT: 732 #ifdef __SOFTFP__ 733 case T_FLOAT: 734 #endif // __SOFTFP__ 735 __ ldr(dest->as_pointer_register(), as_Address(addr)); 736 break; 737 738 case T_BOOLEAN: 739 __ ldrb(dest->as_register(), as_Address(addr)); 740 break; 741 742 case T_BYTE: 743 __ ldrsb(dest->as_register(), as_Address(addr)); 744 break; 745 746 case T_CHAR: 747 __ ldrh(dest->as_register(), as_Address(addr)); 748 break; 749 750 case T_SHORT: 751 __ ldrsh(dest->as_register(), as_Address(addr)); 752 break; 753 754 755 #ifdef __SOFTFP__ 756 case T_DOUBLE: 757 #endif // __SOFTFP__ 758 case T_LONG: { 759 Register to_lo = dest->as_register_lo(); 760 Register to_hi = dest->as_register_hi(); 761 if (addr->index()->is_register()) { 762 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 763 assert(addr->disp() == 0, "Not yet supporting both"); 764 __ add(Rtemp, base_reg, addr->index()->as_register()); 765 base_reg = Rtemp; 766 __ ldr(to_lo, Address(Rtemp)); 767 if (patch != NULL) { 768 __ nop(); // see comment before patching_epilog for 2nd ldr 769 patching_epilog(patch, lir_patch_low, base_reg, info); 770 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 771 patch_code = lir_patch_high; 772 } 773 __ ldr(to_hi, Address(Rtemp, BytesPerWord)); 774 } else if (base_reg == to_lo) { 775 __ ldr(to_hi, as_Address_hi(addr)); 776 if (patch != NULL) { 777 __ nop(); // see comment before patching_epilog for 2nd ldr 778 patching_epilog(patch, lir_patch_high, base_reg, info); 779 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 780 patch_code = lir_patch_low; 781 } 782 __ ldr(to_lo, as_Address_lo(addr)); 783 } else { 784 __ ldr(to_lo, as_Address_lo(addr)); 785 if (patch != NULL) { 786 __ nop(); // see comment before patching_epilog for 2nd ldr 787 patching_epilog(patch, lir_patch_low, base_reg, info); 788 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 789 patch_code = lir_patch_high; 790 } 791 __ ldr(to_hi, as_Address_hi(addr)); 792 } 793 break; 794 } 795 796 #ifndef __SOFTFP__ 797 case T_FLOAT: 798 if (addr->index()->is_register()) { 799 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 800 __ add(Rtemp, base_reg, addr->index()->as_register()); 801 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 802 __ flds(dest->as_float_reg(), Address(Rtemp, addr->disp())); 803 } else { 804 __ flds(dest->as_float_reg(), as_Address(addr)); 805 } 806 break; 807 808 case T_DOUBLE: 809 if (addr->index()->is_register()) { 810 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 811 __ add(Rtemp, base_reg, addr->index()->as_register()); 812 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 813 __ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp())); 814 } else { 815 __ fldd(dest->as_double_reg(), as_Address(addr)); 816 } 817 break; 818 #endif // __SOFTFP__ 819 820 821 default: 822 ShouldNotReachHere(); 823 } 824 825 if (patch != NULL) { 826 // Offset embedded into LDR/STR instruction may appear not enough 827 // to address a field. So, provide a space for one more instruction 828 // that will deal with larger offsets. 829 __ nop(); 830 patching_epilog(patch, patch_code, base_reg, info); 831 } 832 833 } 834 835 836 void LIR_Assembler::emit_op3(LIR_Op3* op) { 837 bool is_32 = op->result_opr()->is_single_cpu(); 838 839 if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) { 840 int c = op->in_opr2()->as_constant_ptr()->as_jint(); 841 assert(is_power_of_2(c), "non power-of-2 constant should be put in a register"); 842 843 Register left = op->in_opr1()->as_register(); 844 Register dest = op->result_opr()->as_register(); 845 if (c == 1) { 846 __ mov(dest, left); 847 } else if (c == 2) { 848 __ add_32(dest, left, AsmOperand(left, lsr, 31)); 849 __ asr_32(dest, dest, 1); 850 } else if (c != (int) 0x80000000) { 851 int power = log2i_exact(c); 852 __ asr_32(Rtemp, left, 31); 853 __ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0); 854 __ asr_32(dest, dest, power); // dest = dest >>> power; 855 } else { 856 // x/0x80000000 is a special case, since dividend is a power of two, but is negative. 857 // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000. 858 __ cmp_32(left, c); 859 __ mov(dest, 0, ne); 860 __ mov(dest, 1, eq); 861 } 862 } else { 863 assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3"); 864 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type); 865 add_debug_info_for_div0_here(op->info()); 866 } 867 } 868 869 870 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 871 #ifdef ASSERT 872 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 873 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 874 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 875 assert(op->info() == NULL, "CodeEmitInfo?"); 876 #endif // ASSERT 877 878 #ifdef __SOFTFP__ 879 assert (op->code() != lir_cond_float_branch, "this should be impossible"); 880 #else 881 if (op->code() == lir_cond_float_branch) { 882 __ fmstat(); 883 __ b(*(op->ublock()->label()), vs); 884 } 885 #endif // __SOFTFP__ 886 887 AsmCondition acond = al; 888 switch (op->cond()) { 889 case lir_cond_equal: acond = eq; break; 890 case lir_cond_notEqual: acond = ne; break; 891 case lir_cond_less: acond = lt; break; 892 case lir_cond_lessEqual: acond = le; break; 893 case lir_cond_greaterEqual: acond = ge; break; 894 case lir_cond_greater: acond = gt; break; 895 case lir_cond_aboveEqual: acond = hs; break; 896 case lir_cond_belowEqual: acond = ls; break; 897 default: assert(op->cond() == lir_cond_always, "must be"); 898 } 899 __ b(*(op->label()), acond); 900 } 901 902 903 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 904 LIR_Opr src = op->in_opr(); 905 LIR_Opr dest = op->result_opr(); 906 907 switch (op->bytecode()) { 908 case Bytecodes::_i2l: 909 move_regs(src->as_register(), dest->as_register_lo()); 910 __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31)); 911 break; 912 case Bytecodes::_l2i: 913 move_regs(src->as_register_lo(), dest->as_register()); 914 break; 915 case Bytecodes::_i2b: 916 __ sign_extend(dest->as_register(), src->as_register(), 8); 917 break; 918 case Bytecodes::_i2s: 919 __ sign_extend(dest->as_register(), src->as_register(), 16); 920 break; 921 case Bytecodes::_i2c: 922 __ zero_extend(dest->as_register(), src->as_register(), 16); 923 break; 924 case Bytecodes::_f2d: 925 __ convert_f2d(dest->as_double_reg(), src->as_float_reg()); 926 break; 927 case Bytecodes::_d2f: 928 __ convert_d2f(dest->as_float_reg(), src->as_double_reg()); 929 break; 930 case Bytecodes::_i2f: 931 __ fmsr(Stemp, src->as_register()); 932 __ fsitos(dest->as_float_reg(), Stemp); 933 break; 934 case Bytecodes::_i2d: 935 __ fmsr(Stemp, src->as_register()); 936 __ fsitod(dest->as_double_reg(), Stemp); 937 break; 938 case Bytecodes::_f2i: 939 __ ftosizs(Stemp, src->as_float_reg()); 940 __ fmrs(dest->as_register(), Stemp); 941 break; 942 case Bytecodes::_d2i: 943 __ ftosizd(Stemp, src->as_double_reg()); 944 __ fmrs(dest->as_register(), Stemp); 945 break; 946 default: 947 ShouldNotReachHere(); 948 } 949 } 950 951 952 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 953 if (op->init_check()) { 954 Register tmp = op->tmp1()->as_register(); 955 __ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset())); 956 add_debug_info_for_null_check_here(op->stub()->info()); 957 __ cmp(tmp, InstanceKlass::fully_initialized); 958 __ b(*op->stub()->entry(), ne); 959 } 960 __ allocate_object(op->obj()->as_register(), 961 op->tmp1()->as_register(), 962 op->tmp2()->as_register(), 963 op->tmp3()->as_register(), 964 op->header_size(), 965 op->object_size(), 966 op->klass()->as_register(), 967 *op->stub()->entry()); 968 __ bind(*op->stub()->continuation()); 969 } 970 971 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 972 if (UseSlowPath || 973 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 974 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 975 __ b(*op->stub()->entry()); 976 } else { 977 __ allocate_array(op->obj()->as_register(), 978 op->len()->as_register(), 979 op->tmp1()->as_register(), 980 op->tmp2()->as_register(), 981 op->tmp3()->as_register(), 982 arrayOopDesc::header_size(op->type()), 983 type2aelembytes(op->type()), 984 op->klass()->as_register(), 985 *op->stub()->entry()); 986 } 987 __ bind(*op->stub()->continuation()); 988 } 989 990 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 991 ciMethodData *md, ciProfileData *data, 992 Register recv, Register tmp1, Label* update_done) { 993 assert_different_registers(mdo, recv, tmp1); 994 uint i; 995 for (i = 0; i < VirtualCallData::row_limit(); i++) { 996 Label next_test; 997 // See if the receiver is receiver[n]. 998 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 999 mdo_offset_bias); 1000 __ ldr(tmp1, receiver_addr); 1001 __ verify_klass_ptr(tmp1); 1002 __ cmp(recv, tmp1); 1003 __ b(next_test, ne); 1004 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 1005 mdo_offset_bias); 1006 __ ldr(tmp1, data_addr); 1007 __ add(tmp1, tmp1, DataLayout::counter_increment); 1008 __ str(tmp1, data_addr); 1009 __ b(*update_done); 1010 __ bind(next_test); 1011 } 1012 1013 // Didn't find receiver; find next empty slot and fill it in 1014 for (i = 0; i < VirtualCallData::row_limit(); i++) { 1015 Label next_test; 1016 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 1017 mdo_offset_bias); 1018 __ ldr(tmp1, recv_addr); 1019 __ cbnz(tmp1, next_test); 1020 __ str(recv, recv_addr); 1021 __ mov(tmp1, DataLayout::counter_increment); 1022 __ str(tmp1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 1023 mdo_offset_bias)); 1024 __ b(*update_done); 1025 __ bind(next_test); 1026 } 1027 } 1028 1029 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 1030 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 1031 md = method->method_data_or_null(); 1032 assert(md != NULL, "Sanity"); 1033 data = md->bci_to_data(bci); 1034 assert(data != NULL, "need data for checkcast"); 1035 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1036 if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) { 1037 // The offset is large so bias the mdo by the base of the slot so 1038 // that the ldr can use an immediate offset to reference the slots of the data 1039 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 1040 } 1041 } 1042 1043 // On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null). 1044 void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci, 1045 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias, 1046 Register obj, Register mdo, Register data_val, Label* obj_is_null) { 1047 assert(method != NULL, "Should have method"); 1048 assert_different_registers(obj, mdo, data_val); 1049 setup_md_access(method, bci, md, data, mdo_offset_bias); 1050 Label not_null; 1051 __ b(not_null, ne); 1052 __ mov_metadata(mdo, md->constant_encoding()); 1053 if (mdo_offset_bias > 0) { 1054 __ mov_slow(data_val, mdo_offset_bias); 1055 __ add(mdo, mdo, data_val); 1056 } 1057 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 1058 __ ldrb(data_val, flags_addr); 1059 __ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant()); 1060 __ strb(data_val, flags_addr); 1061 __ b(*obj_is_null); 1062 __ bind(not_null); 1063 } 1064 1065 void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias, 1066 Register mdo, Register recv, Register value, Register tmp1, 1067 Label* profile_cast_success, Label* profile_cast_failure, 1068 Label* success, Label* failure) { 1069 assert_different_registers(mdo, value, tmp1); 1070 __ bind(*profile_cast_success); 1071 __ mov_metadata(mdo, md->constant_encoding()); 1072 if (mdo_offset_bias > 0) { 1073 __ mov_slow(tmp1, mdo_offset_bias); 1074 __ add(mdo, mdo, tmp1); 1075 } 1076 __ load_klass(recv, value); 1077 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 1078 __ b(*success); 1079 // Cast failure case 1080 __ bind(*profile_cast_failure); 1081 __ mov_metadata(mdo, md->constant_encoding()); 1082 if (mdo_offset_bias > 0) { 1083 __ mov_slow(tmp1, mdo_offset_bias); 1084 __ add(mdo, mdo, tmp1); 1085 } 1086 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 1087 __ ldr(tmp1, data_addr); 1088 __ sub(tmp1, tmp1, DataLayout::counter_increment); 1089 __ str(tmp1, data_addr); 1090 __ b(*failure); 1091 } 1092 1093 // Sets `res` to true, if `cond` holds. 1094 static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) { 1095 __ mov(res, 1, cond); 1096 } 1097 1098 1099 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1100 // TODO: ARM - can be more effective with one more register 1101 switch (op->code()) { 1102 case lir_store_check: { 1103 CodeStub* stub = op->stub(); 1104 Register value = op->object()->as_register(); 1105 Register array = op->array()->as_register(); 1106 Register klass_RInfo = op->tmp1()->as_register(); 1107 Register k_RInfo = op->tmp2()->as_register(); 1108 assert_different_registers(klass_RInfo, k_RInfo, Rtemp); 1109 if (op->should_profile()) { 1110 assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp); 1111 } 1112 1113 // check if it needs to be profiled 1114 ciMethodData* md; 1115 ciProfileData* data; 1116 int mdo_offset_bias = 0; 1117 Label profile_cast_success, profile_cast_failure, done; 1118 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1119 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1120 1121 if (op->should_profile()) { 1122 __ cmp(value, 0); 1123 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done); 1124 } else { 1125 __ cbz(value, done); 1126 } 1127 assert_different_registers(k_RInfo, value); 1128 add_debug_info_for_null_check_here(op->info_for_exception()); 1129 __ load_klass(k_RInfo, array); 1130 __ load_klass(klass_RInfo, value); 1131 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1132 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1133 // check for immediate positive hit 1134 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1135 __ cmp(klass_RInfo, k_RInfo); 1136 __ cond_cmp(Rtemp, k_RInfo, ne); 1137 __ b(*success_target, eq); 1138 // check for immediate negative hit 1139 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1140 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1141 __ b(*failure_target, ne); 1142 // slow case 1143 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1144 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1145 __ cbz(R0, *failure_target); 1146 if (op->should_profile()) { 1147 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1148 if (mdo == value) { 1149 mdo = k_RInfo; 1150 recv = klass_RInfo; 1151 } 1152 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, value, tmp1, 1153 &profile_cast_success, &profile_cast_failure, 1154 &done, stub->entry()); 1155 } 1156 __ bind(done); 1157 break; 1158 } 1159 1160 case lir_checkcast: { 1161 CodeStub* stub = op->stub(); 1162 Register obj = op->object()->as_register(); 1163 Register res = op->result_opr()->as_register(); 1164 Register klass_RInfo = op->tmp1()->as_register(); 1165 Register k_RInfo = op->tmp2()->as_register(); 1166 ciKlass* k = op->klass(); 1167 assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp); 1168 1169 if (stub->is_simple_exception_stub()) { 1170 // TODO: ARM - Late binding is used to prevent confusion of register allocator 1171 assert(stub->is_exception_throw_stub(), "must be"); 1172 ((SimpleExceptionStub*)stub)->set_obj(op->result_opr()); 1173 } 1174 ciMethodData* md; 1175 ciProfileData* data; 1176 int mdo_offset_bias = 0; 1177 1178 Label done; 1179 1180 Label profile_cast_failure, profile_cast_success; 1181 Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry(); 1182 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1183 1184 1185 __ movs(res, obj); 1186 if (op->should_profile()) { 1187 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1188 } else { 1189 __ b(done, eq); 1190 } 1191 if (k->is_loaded()) { 1192 __ mov_metadata(k_RInfo, k->constant_encoding()); 1193 } else if (k_RInfo != obj) { 1194 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1195 __ movs(res, obj); 1196 } else { 1197 // Patching doesn't update "res" register after GC, so do patching first 1198 klass2reg_with_patching(Rtemp, op->info_for_patch()); 1199 __ movs(res, obj); 1200 __ mov(k_RInfo, Rtemp); 1201 } 1202 __ load_klass(klass_RInfo, res, ne); 1203 1204 if (op->fast_check()) { 1205 __ cmp(klass_RInfo, k_RInfo, ne); 1206 __ b(*failure_target, ne); 1207 } else if (k->is_loaded()) { 1208 __ b(*success_target, eq); 1209 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1210 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1211 __ cmp(Rtemp, k_RInfo); 1212 __ b(*failure_target, ne); 1213 } else { 1214 __ cmp(klass_RInfo, k_RInfo); 1215 __ cmp(Rtemp, k_RInfo, ne); 1216 __ b(*success_target, eq); 1217 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1218 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1219 __ cbz(R0, *failure_target); 1220 } 1221 } else { 1222 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1223 __ b(*success_target, eq); 1224 // check for immediate positive hit 1225 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1226 __ cmp(klass_RInfo, k_RInfo); 1227 __ cmp(Rtemp, k_RInfo, ne); 1228 __ b(*success_target, eq); 1229 // check for immediate negative hit 1230 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1231 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1232 __ b(*failure_target, ne); 1233 // slow case 1234 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1235 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1236 __ cbz(R0, *failure_target); 1237 } 1238 1239 if (op->should_profile()) { 1240 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1241 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1242 &profile_cast_success, &profile_cast_failure, 1243 &done, stub->entry()); 1244 } 1245 __ bind(done); 1246 break; 1247 } 1248 1249 case lir_instanceof: { 1250 Register obj = op->object()->as_register(); 1251 Register res = op->result_opr()->as_register(); 1252 Register klass_RInfo = op->tmp1()->as_register(); 1253 Register k_RInfo = op->tmp2()->as_register(); 1254 ciKlass* k = op->klass(); 1255 assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp); 1256 1257 ciMethodData* md; 1258 ciProfileData* data; 1259 int mdo_offset_bias = 0; 1260 1261 Label done; 1262 1263 Label profile_cast_failure, profile_cast_success; 1264 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done; 1265 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1266 1267 __ movs(res, obj); 1268 1269 if (op->should_profile()) { 1270 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1271 } else { 1272 __ b(done, eq); 1273 } 1274 1275 if (k->is_loaded()) { 1276 __ mov_metadata(k_RInfo, k->constant_encoding()); 1277 } else { 1278 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res)); 1279 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1280 } 1281 __ load_klass(klass_RInfo, res); 1282 1283 if (!op->should_profile()) { 1284 __ mov(res, 0); 1285 } 1286 1287 if (op->fast_check()) { 1288 __ cmp(klass_RInfo, k_RInfo); 1289 if (!op->should_profile()) { 1290 set_instanceof_result(_masm, res, eq); 1291 } else { 1292 __ b(profile_cast_failure, ne); 1293 } 1294 } else if (k->is_loaded()) { 1295 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1296 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1297 __ cmp(Rtemp, k_RInfo); 1298 if (!op->should_profile()) { 1299 set_instanceof_result(_masm, res, eq); 1300 } else { 1301 __ b(profile_cast_failure, ne); 1302 } 1303 } else { 1304 __ cmp(klass_RInfo, k_RInfo); 1305 __ cond_cmp(Rtemp, k_RInfo, ne); 1306 if (!op->should_profile()) { 1307 set_instanceof_result(_masm, res, eq); 1308 } 1309 __ b(*success_target, eq); 1310 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1311 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1312 if (!op->should_profile()) { 1313 move_regs(R0, res); 1314 } else { 1315 __ cbz(R0, *failure_target); 1316 } 1317 } 1318 } else { 1319 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1320 // check for immediate positive hit 1321 __ cmp(klass_RInfo, k_RInfo); 1322 if (!op->should_profile()) { 1323 __ ldr(res, Address(klass_RInfo, Rtemp), ne); 1324 __ cond_cmp(res, k_RInfo, ne); 1325 set_instanceof_result(_masm, res, eq); 1326 } else { 1327 __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne); 1328 __ cond_cmp(Rtemp, k_RInfo, ne); 1329 } 1330 __ b(*success_target, eq); 1331 // check for immediate negative hit 1332 if (op->should_profile()) { 1333 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1334 } 1335 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1336 if (!op->should_profile()) { 1337 __ mov(res, 0, ne); 1338 } 1339 __ b(*failure_target, ne); 1340 // slow case 1341 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1342 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1343 if (!op->should_profile()) { 1344 move_regs(R0, res); 1345 } 1346 if (op->should_profile()) { 1347 __ cbz(R0, *failure_target); 1348 } 1349 } 1350 1351 if (op->should_profile()) { 1352 Label done_ok, done_failure; 1353 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1354 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1355 &profile_cast_success, &profile_cast_failure, 1356 &done_ok, &done_failure); 1357 __ bind(done_failure); 1358 __ mov(res, 0); 1359 __ b(done); 1360 __ bind(done_ok); 1361 __ mov(res, 1); 1362 } 1363 __ bind(done); 1364 break; 1365 } 1366 default: 1367 ShouldNotReachHere(); 1368 } 1369 } 1370 1371 1372 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1373 // if (*addr == cmpval) { 1374 // *addr = newval; 1375 // dest = 1; 1376 // } else { 1377 // dest = 0; 1378 // } 1379 // FIXME: membar_release 1380 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 1381 Register addr = op->addr()->is_register() ? 1382 op->addr()->as_pointer_register() : 1383 op->addr()->as_address_ptr()->base()->as_pointer_register(); 1384 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp"); 1385 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_OprDesc::illegalOpr(), "unexpected index"); 1386 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 1387 Register cmpval = op->cmp_value()->as_register(); 1388 Register newval = op->new_value()->as_register(); 1389 Register dest = op->result_opr()->as_register(); 1390 assert_different_registers(dest, addr, cmpval, newval, Rtemp); 1391 1392 __ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer 1393 __ mov(dest, 1, eq); 1394 __ mov(dest, 0, ne); 1395 } else if (op->code() == lir_cas_long) { 1396 assert(VM_Version::supports_cx8(), "wrong machine"); 1397 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 1398 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 1399 Register new_value_lo = op->new_value()->as_register_lo(); 1400 Register new_value_hi = op->new_value()->as_register_hi(); 1401 Register dest = op->result_opr()->as_register(); 1402 Register tmp_lo = op->tmp1()->as_register_lo(); 1403 Register tmp_hi = op->tmp1()->as_register_hi(); 1404 1405 assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr); 1406 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 1407 assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair"); 1408 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1409 assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1410 __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi, 1411 new_value_lo, new_value_hi, addr, 0); 1412 } else { 1413 Unimplemented(); 1414 } 1415 // FIXME: is full membar really needed instead of just membar_acquire? 1416 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 1417 } 1418 1419 1420 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1421 AsmCondition acond = al; 1422 AsmCondition ncond = nv; 1423 if (opr1 != opr2) { 1424 switch (condition) { 1425 case lir_cond_equal: acond = eq; ncond = ne; break; 1426 case lir_cond_notEqual: acond = ne; ncond = eq; break; 1427 case lir_cond_less: acond = lt; ncond = ge; break; 1428 case lir_cond_lessEqual: acond = le; ncond = gt; break; 1429 case lir_cond_greaterEqual: acond = ge; ncond = lt; break; 1430 case lir_cond_greater: acond = gt; ncond = le; break; 1431 case lir_cond_aboveEqual: acond = hs; ncond = lo; break; 1432 case lir_cond_belowEqual: acond = ls; ncond = hi; break; 1433 default: ShouldNotReachHere(); 1434 } 1435 } 1436 1437 for (;;) { // two iterations only 1438 if (opr1 == result) { 1439 // do nothing 1440 } else if (opr1->is_single_cpu()) { 1441 __ mov(result->as_register(), opr1->as_register(), acond); 1442 } else if (opr1->is_double_cpu()) { 1443 __ long_move(result->as_register_lo(), result->as_register_hi(), 1444 opr1->as_register_lo(), opr1->as_register_hi(), acond); 1445 } else if (opr1->is_single_stack()) { 1446 __ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond); 1447 } else if (opr1->is_double_stack()) { 1448 __ ldr(result->as_register_lo(), 1449 frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond); 1450 __ ldr(result->as_register_hi(), 1451 frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond); 1452 } else if (opr1->is_illegal()) { 1453 // do nothing: this part of the cmove has been optimized away in the peephole optimizer 1454 } else { 1455 assert(opr1->is_constant(), "must be"); 1456 LIR_Const* c = opr1->as_constant_ptr(); 1457 1458 switch (c->type()) { 1459 case T_INT: 1460 __ mov_slow(result->as_register(), c->as_jint(), acond); 1461 break; 1462 case T_LONG: 1463 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1464 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1465 break; 1466 case T_OBJECT: 1467 __ mov_oop(result->as_register(), c->as_jobject(), 0, acond); 1468 break; 1469 case T_FLOAT: 1470 #ifdef __SOFTFP__ 1471 // not generated now. 1472 __ mov_slow(result->as_register(), c->as_jint(), acond); 1473 #else 1474 __ mov_float(result->as_float_reg(), c->as_jfloat(), acond); 1475 #endif // __SOFTFP__ 1476 break; 1477 case T_DOUBLE: 1478 #ifdef __SOFTFP__ 1479 // not generated now. 1480 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1481 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1482 #else 1483 __ mov_double(result->as_double_reg(), c->as_jdouble(), acond); 1484 #endif // __SOFTFP__ 1485 break; 1486 case T_METADATA: 1487 __ mov_metadata(result->as_register(), c->as_metadata(), acond); 1488 break; 1489 default: 1490 ShouldNotReachHere(); 1491 } 1492 } 1493 1494 // Negate the condition and repeat the algorithm with the second operand 1495 if (opr1 == opr2) { break; } 1496 opr1 = opr2; 1497 acond = ncond; 1498 } 1499 } 1500 1501 #ifdef ASSERT 1502 static int reg_size(LIR_Opr op) { 1503 switch (op->type()) { 1504 case T_FLOAT: 1505 case T_INT: return BytesPerInt; 1506 case T_LONG: 1507 case T_DOUBLE: return BytesPerLong; 1508 case T_OBJECT: 1509 case T_ARRAY: 1510 case T_METADATA: return BytesPerWord; 1511 case T_ADDRESS: 1512 case T_ILLEGAL: // fall through 1513 default: ShouldNotReachHere(); return -1; 1514 } 1515 } 1516 #endif 1517 1518 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1519 assert(info == NULL, "unused on this code path"); 1520 assert(dest->is_register(), "wrong items state"); 1521 1522 if (right->is_address()) { 1523 // special case for adding shifted/extended register 1524 const Register res = dest->as_pointer_register(); 1525 const Register lreg = left->as_pointer_register(); 1526 const LIR_Address* addr = right->as_address_ptr(); 1527 1528 assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1529 1530 int scale = addr->scale(); 1531 AsmShift shift = lsl; 1532 1533 1534 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be"); 1535 assert(reg_size(addr->base()) == reg_size(dest), "should be"); 1536 assert(reg_size(dest) == wordSize, "should be"); 1537 1538 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale); 1539 switch (code) { 1540 case lir_add: __ add(res, lreg, operand); break; 1541 case lir_sub: __ sub(res, lreg, operand); break; 1542 default: ShouldNotReachHere(); 1543 } 1544 1545 } else if (left->is_address()) { 1546 assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()"); 1547 const LIR_Address* addr = left->as_address_ptr(); 1548 const Register res = dest->as_register(); 1549 const Register rreg = right->as_register(); 1550 assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1551 __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale())); 1552 1553 } else if (dest->is_single_cpu()) { 1554 assert(left->is_single_cpu(), "unexpected left operand"); 1555 1556 const Register res = dest->as_register(); 1557 const Register lreg = left->as_register(); 1558 1559 if (right->is_single_cpu()) { 1560 const Register rreg = right->as_register(); 1561 switch (code) { 1562 case lir_add: __ add_32(res, lreg, rreg); break; 1563 case lir_sub: __ sub_32(res, lreg, rreg); break; 1564 case lir_mul: __ mul_32(res, lreg, rreg); break; 1565 default: ShouldNotReachHere(); 1566 } 1567 } else { 1568 assert(right->is_constant(), "must be"); 1569 const jint c = right->as_constant_ptr()->as_jint(); 1570 if (!Assembler::is_arith_imm_in_range(c)) { 1571 BAILOUT("illegal arithmetic operand"); 1572 } 1573 switch (code) { 1574 case lir_add: __ add_32(res, lreg, c); break; 1575 case lir_sub: __ sub_32(res, lreg, c); break; 1576 default: ShouldNotReachHere(); 1577 } 1578 } 1579 1580 } else if (dest->is_double_cpu()) { 1581 Register res_lo = dest->as_register_lo(); 1582 Register res_hi = dest->as_register_hi(); 1583 Register lreg_lo = left->as_register_lo(); 1584 Register lreg_hi = left->as_register_hi(); 1585 if (right->is_double_cpu()) { 1586 Register rreg_lo = right->as_register_lo(); 1587 Register rreg_hi = right->as_register_hi(); 1588 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1589 res_lo = Rtemp; 1590 } 1591 switch (code) { 1592 case lir_add: 1593 __ adds(res_lo, lreg_lo, rreg_lo); 1594 __ adc(res_hi, lreg_hi, rreg_hi); 1595 break; 1596 case lir_sub: 1597 __ subs(res_lo, lreg_lo, rreg_lo); 1598 __ sbc(res_hi, lreg_hi, rreg_hi); 1599 break; 1600 default: 1601 ShouldNotReachHere(); 1602 } 1603 } else { 1604 assert(right->is_constant(), "must be"); 1605 assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range"); 1606 const jint c = (jint) right->as_constant_ptr()->as_jlong(); 1607 if (res_lo == lreg_hi) { 1608 res_lo = Rtemp; 1609 } 1610 switch (code) { 1611 case lir_add: 1612 __ adds(res_lo, lreg_lo, c); 1613 __ adc(res_hi, lreg_hi, 0); 1614 break; 1615 case lir_sub: 1616 __ subs(res_lo, lreg_lo, c); 1617 __ sbc(res_hi, lreg_hi, 0); 1618 break; 1619 default: 1620 ShouldNotReachHere(); 1621 } 1622 } 1623 move_regs(res_lo, dest->as_register_lo()); 1624 1625 } else if (dest->is_single_fpu()) { 1626 assert(left->is_single_fpu(), "must be"); 1627 assert(right->is_single_fpu(), "must be"); 1628 const FloatRegister res = dest->as_float_reg(); 1629 const FloatRegister lreg = left->as_float_reg(); 1630 const FloatRegister rreg = right->as_float_reg(); 1631 switch (code) { 1632 case lir_add: __ add_float(res, lreg, rreg); break; 1633 case lir_sub: __ sub_float(res, lreg, rreg); break; 1634 case lir_mul: __ mul_float(res, lreg, rreg); break; 1635 case lir_div: __ div_float(res, lreg, rreg); break; 1636 default: ShouldNotReachHere(); 1637 } 1638 } else if (dest->is_double_fpu()) { 1639 assert(left->is_double_fpu(), "must be"); 1640 assert(right->is_double_fpu(), "must be"); 1641 const FloatRegister res = dest->as_double_reg(); 1642 const FloatRegister lreg = left->as_double_reg(); 1643 const FloatRegister rreg = right->as_double_reg(); 1644 switch (code) { 1645 case lir_add: __ add_double(res, lreg, rreg); break; 1646 case lir_sub: __ sub_double(res, lreg, rreg); break; 1647 case lir_mul: __ mul_double(res, lreg, rreg); break; 1648 case lir_div: __ div_double(res, lreg, rreg); break; 1649 default: ShouldNotReachHere(); 1650 } 1651 } else { 1652 ShouldNotReachHere(); 1653 } 1654 } 1655 1656 1657 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 1658 switch (code) { 1659 case lir_abs: 1660 __ abs_double(dest->as_double_reg(), value->as_double_reg()); 1661 break; 1662 case lir_sqrt: 1663 __ sqrt_double(dest->as_double_reg(), value->as_double_reg()); 1664 break; 1665 default: 1666 ShouldNotReachHere(); 1667 } 1668 } 1669 1670 1671 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1672 assert(dest->is_register(), "wrong items state"); 1673 assert(left->is_register(), "wrong items state"); 1674 1675 if (dest->is_single_cpu()) { 1676 1677 const Register res = dest->as_register(); 1678 const Register lreg = left->as_register(); 1679 1680 if (right->is_single_cpu()) { 1681 const Register rreg = right->as_register(); 1682 switch (code) { 1683 case lir_logic_and: __ and_32(res, lreg, rreg); break; 1684 case lir_logic_or: __ orr_32(res, lreg, rreg); break; 1685 case lir_logic_xor: __ eor_32(res, lreg, rreg); break; 1686 default: ShouldNotReachHere(); 1687 } 1688 } else { 1689 assert(right->is_constant(), "must be"); 1690 const uint c = (uint)right->as_constant_ptr()->as_jint(); 1691 if (!Assembler::is_arith_imm_in_range(c)) { 1692 BAILOUT("illegal arithmetic operand"); 1693 } 1694 switch (code) { 1695 case lir_logic_and: __ and_32(res, lreg, c); break; 1696 case lir_logic_or: __ orr_32(res, lreg, c); break; 1697 case lir_logic_xor: __ eor_32(res, lreg, c); break; 1698 default: ShouldNotReachHere(); 1699 } 1700 } 1701 } else { 1702 assert(dest->is_double_cpu(), "should be"); 1703 Register res_lo = dest->as_register_lo(); 1704 1705 assert (dest->type() == T_LONG, "unexpected result type"); 1706 assert (left->type() == T_LONG, "unexpected left type"); 1707 assert (right->type() == T_LONG, "unexpected right type"); 1708 1709 const Register res_hi = dest->as_register_hi(); 1710 const Register lreg_lo = left->as_register_lo(); 1711 const Register lreg_hi = left->as_register_hi(); 1712 1713 if (right->is_register()) { 1714 const Register rreg_lo = right->as_register_lo(); 1715 const Register rreg_hi = right->as_register_hi(); 1716 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1717 res_lo = Rtemp; // Temp register helps to avoid overlap between result and input 1718 } 1719 switch (code) { 1720 case lir_logic_and: 1721 __ andr(res_lo, lreg_lo, rreg_lo); 1722 __ andr(res_hi, lreg_hi, rreg_hi); 1723 break; 1724 case lir_logic_or: 1725 __ orr(res_lo, lreg_lo, rreg_lo); 1726 __ orr(res_hi, lreg_hi, rreg_hi); 1727 break; 1728 case lir_logic_xor: 1729 __ eor(res_lo, lreg_lo, rreg_lo); 1730 __ eor(res_hi, lreg_hi, rreg_hi); 1731 break; 1732 default: 1733 ShouldNotReachHere(); 1734 } 1735 move_regs(res_lo, dest->as_register_lo()); 1736 } else { 1737 assert(right->is_constant(), "must be"); 1738 const jint c_lo = (jint) right->as_constant_ptr()->as_jlong(); 1739 const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32); 1740 // Case for logic_or from do_ClassIDIntrinsic() 1741 if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) { 1742 switch (code) { 1743 case lir_logic_and: 1744 __ andr(res_lo, lreg_lo, c_lo); 1745 __ mov(res_hi, 0); 1746 break; 1747 case lir_logic_or: 1748 __ orr(res_lo, lreg_lo, c_lo); 1749 break; 1750 case lir_logic_xor: 1751 __ eor(res_lo, lreg_lo, c_lo); 1752 break; 1753 default: 1754 ShouldNotReachHere(); 1755 } 1756 } else if (code == lir_logic_and && 1757 c_hi == -1 && 1758 (AsmOperand::is_rotated_imm(c_lo) || 1759 AsmOperand::is_rotated_imm(~c_lo))) { 1760 // Another case which handles logic_and from do_ClassIDIntrinsic() 1761 if (AsmOperand::is_rotated_imm(c_lo)) { 1762 __ andr(res_lo, lreg_lo, c_lo); 1763 } else { 1764 __ bic(res_lo, lreg_lo, ~c_lo); 1765 } 1766 if (res_hi != lreg_hi) { 1767 __ mov(res_hi, lreg_hi); 1768 } 1769 } else { 1770 BAILOUT("64 bit constant cannot be inlined"); 1771 } 1772 } 1773 } 1774 } 1775 1776 1777 1778 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1779 if (opr1->is_single_cpu()) { 1780 if (opr2->is_constant()) { 1781 switch (opr2->as_constant_ptr()->type()) { 1782 case T_INT: { 1783 const jint c = opr2->as_constant_ptr()->as_jint(); 1784 if (Assembler::is_arith_imm_in_range(c)) { 1785 __ cmp_32(opr1->as_register(), c); 1786 } else if (Assembler::is_arith_imm_in_range(-c)) { 1787 __ cmn_32(opr1->as_register(), -c); 1788 } else { 1789 // This can happen when compiling lookupswitch 1790 __ mov_slow(Rtemp, c); 1791 __ cmp_32(opr1->as_register(), Rtemp); 1792 } 1793 break; 1794 } 1795 case T_OBJECT: 1796 assert(opr2->as_constant_ptr()->as_jobject() == NULL, "cannot handle otherwise"); 1797 __ cmp(opr1->as_register(), 0); 1798 break; 1799 case T_METADATA: 1800 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests"); 1801 assert(opr2->as_constant_ptr()->as_metadata() == NULL, "cannot handle otherwise"); 1802 __ cmp(opr1->as_register(), 0); 1803 break; 1804 default: 1805 ShouldNotReachHere(); 1806 } 1807 } else if (opr2->is_single_cpu()) { 1808 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1809 assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY, "incompatibe type"); 1810 __ cmpoop(opr1->as_register(), opr2->as_register()); 1811 } else if (opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) { 1812 assert(opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type"); 1813 __ cmp(opr1->as_register(), opr2->as_register()); 1814 } else { 1815 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type"); 1816 __ cmp_32(opr1->as_register(), opr2->as_register()); 1817 } 1818 } else { 1819 ShouldNotReachHere(); 1820 } 1821 } else if (opr1->is_double_cpu()) { 1822 Register xlo = opr1->as_register_lo(); 1823 Register xhi = opr1->as_register_hi(); 1824 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1825 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise"); 1826 __ orrs(Rtemp, xlo, xhi); 1827 } else if (opr2->is_register()) { 1828 Register ylo = opr2->as_register_lo(); 1829 Register yhi = opr2->as_register_hi(); 1830 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1831 __ teq(xhi, yhi); 1832 __ teq(xlo, ylo, eq); 1833 } else { 1834 __ subs(Rtemp, xlo, ylo); 1835 __ sbcs(Rtemp, xhi, yhi); 1836 } 1837 } else { 1838 ShouldNotReachHere(); 1839 } 1840 } else if (opr1->is_single_fpu()) { 1841 if (opr2->is_constant()) { 1842 assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise"); 1843 __ cmp_zero_float(opr1->as_float_reg()); 1844 } else { 1845 __ cmp_float(opr1->as_float_reg(), opr2->as_float_reg()); 1846 } 1847 } else if (opr1->is_double_fpu()) { 1848 if (opr2->is_constant()) { 1849 assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise"); 1850 __ cmp_zero_double(opr1->as_double_reg()); 1851 } else { 1852 __ cmp_double(opr1->as_double_reg(), opr2->as_double_reg()); 1853 } 1854 } else { 1855 ShouldNotReachHere(); 1856 } 1857 } 1858 1859 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1860 const Register res = dst->as_register(); 1861 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1862 comp_op(lir_cond_unknown, left, right, op); 1863 __ fmstat(); 1864 if (code == lir_ucmp_fd2i) { // unordered is less 1865 __ mvn(res, 0, lt); 1866 __ mov(res, 1, ge); 1867 } else { // unordered is greater 1868 __ mov(res, 1, cs); 1869 __ mvn(res, 0, cc); 1870 } 1871 __ mov(res, 0, eq); 1872 1873 } else { 1874 assert(code == lir_cmp_l2i, "must be"); 1875 1876 Label done; 1877 const Register xlo = left->as_register_lo(); 1878 const Register xhi = left->as_register_hi(); 1879 const Register ylo = right->as_register_lo(); 1880 const Register yhi = right->as_register_hi(); 1881 __ cmp(xhi, yhi); 1882 __ mov(res, 1, gt); 1883 __ mvn(res, 0, lt); 1884 __ b(done, ne); 1885 __ subs(res, xlo, ylo); 1886 __ mov(res, 1, hi); 1887 __ mvn(res, 0, lo); 1888 __ bind(done); 1889 } 1890 } 1891 1892 1893 void LIR_Assembler::align_call(LIR_Code code) { 1894 // Not needed 1895 } 1896 1897 1898 void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) { 1899 int ret_addr_offset = __ patchable_call(op->addr(), rtype); 1900 assert(ret_addr_offset == __ offset(), "embedded return address not allowed"); 1901 add_call_info_here(op->info()); 1902 } 1903 1904 1905 void LIR_Assembler::ic_call(LIR_OpJavaCall *op) { 1906 bool near_range = __ cache_fully_reachable(); 1907 address oop_address = pc(); 1908 1909 bool use_movw = VM_Version::supports_movw(); 1910 1911 // Ricklass may contain something that is not a metadata pointer so 1912 // mov_metadata can't be used 1913 InlinedAddress value((address)Universe::non_oop_word()); 1914 InlinedAddress addr(op->addr()); 1915 if (use_movw) { 1916 __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff); 1917 __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16); 1918 } else { 1919 // No movw/movt, must be load a pc relative value but no 1920 // relocation so no metadata table to load from. 1921 // Use a b instruction rather than a bl, inline constant after the 1922 // branch, use a PC relative ldr to load the constant, arrange for 1923 // the call to return after the constant(s). 1924 __ ldr_literal(Ricklass, value); 1925 } 1926 __ relocate(virtual_call_Relocation::spec(oop_address)); 1927 if (near_range && use_movw) { 1928 __ bl(op->addr()); 1929 } else { 1930 Label call_return; 1931 __ adr(LR, call_return); 1932 if (near_range) { 1933 __ b(op->addr()); 1934 } else { 1935 __ indirect_jump(addr, Rtemp); 1936 __ bind_literal(addr); 1937 } 1938 if (!use_movw) { 1939 __ bind_literal(value); 1940 } 1941 __ bind(call_return); 1942 } 1943 add_call_info(code_offset(), op->info()); 1944 } 1945 1946 void LIR_Assembler::emit_static_call_stub() { 1947 address call_pc = __ pc(); 1948 address stub = __ start_a_stub(call_stub_size()); 1949 if (stub == NULL) { 1950 BAILOUT("static call stub overflow"); 1951 } 1952 1953 DEBUG_ONLY(int offset = code_offset();) 1954 1955 InlinedMetadata metadata_literal(NULL); 1956 __ relocate(static_stub_Relocation::spec(call_pc)); 1957 // If not a single instruction, NativeMovConstReg::next_instruction_address() 1958 // must jump over the whole following ldr_literal. 1959 // (See CompiledStaticCall::set_to_interpreted()) 1960 #ifdef ASSERT 1961 address ldr_site = __ pc(); 1962 #endif 1963 __ ldr_literal(Rmethod, metadata_literal); 1964 assert(nativeMovConstReg_at(ldr_site)->next_instruction_address() == __ pc(), "Fix ldr_literal or its parsing"); 1965 bool near_range = __ cache_fully_reachable(); 1966 InlinedAddress dest((address)-1); 1967 if (near_range) { 1968 address branch_site = __ pc(); 1969 __ b(branch_site); // b to self maps to special NativeJump -1 destination 1970 } else { 1971 __ indirect_jump(dest, Rtemp); 1972 } 1973 __ bind_literal(metadata_literal); // includes spec_for_immediate reloc 1974 if (!near_range) { 1975 __ bind_literal(dest); // special NativeJump -1 destination 1976 } 1977 1978 assert(code_offset() - offset <= call_stub_size(), "overflow"); 1979 __ end_a_stub(); 1980 } 1981 1982 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1983 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1984 assert(exceptionPC->as_register() == Rexception_pc, "must match"); 1985 info->add_register_oop(exceptionOop); 1986 1987 Runtime1::StubID handle_id = compilation()->has_fpu_code() ? 1988 Runtime1::handle_exception_id : 1989 Runtime1::handle_exception_nofpu_id; 1990 Label return_address; 1991 __ adr(Rexception_pc, return_address); 1992 __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type); 1993 __ bind(return_address); 1994 add_call_info_here(info); // for exception handler 1995 } 1996 1997 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1998 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1999 __ b(_unwind_handler_entry); 2000 } 2001 2002 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2003 AsmShift shift = lsl; 2004 switch (code) { 2005 case lir_shl: shift = lsl; break; 2006 case lir_shr: shift = asr; break; 2007 case lir_ushr: shift = lsr; break; 2008 default: ShouldNotReachHere(); 2009 } 2010 2011 if (dest->is_single_cpu()) { 2012 __ andr(Rtemp, count->as_register(), 31); 2013 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp)); 2014 } else if (dest->is_double_cpu()) { 2015 Register dest_lo = dest->as_register_lo(); 2016 Register dest_hi = dest->as_register_hi(); 2017 Register src_lo = left->as_register_lo(); 2018 Register src_hi = left->as_register_hi(); 2019 Register Rcount = count->as_register(); 2020 // Resolve possible register conflicts 2021 if (shift == lsl && dest_hi == src_lo) { 2022 dest_hi = Rtemp; 2023 } else if (shift != lsl && dest_lo == src_hi) { 2024 dest_lo = Rtemp; 2025 } else if (dest_lo == src_lo && dest_hi == src_hi) { 2026 dest_lo = Rtemp; 2027 } else if (dest_lo == Rcount || dest_hi == Rcount) { 2028 Rcount = Rtemp; 2029 } 2030 __ andr(Rcount, count->as_register(), 63); 2031 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount); 2032 move_regs(dest_lo, dest->as_register_lo()); 2033 move_regs(dest_hi, dest->as_register_hi()); 2034 } else { 2035 ShouldNotReachHere(); 2036 } 2037 } 2038 2039 2040 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2041 AsmShift shift = lsl; 2042 switch (code) { 2043 case lir_shl: shift = lsl; break; 2044 case lir_shr: shift = asr; break; 2045 case lir_ushr: shift = lsr; break; 2046 default: ShouldNotReachHere(); 2047 } 2048 2049 if (dest->is_single_cpu()) { 2050 count &= 31; 2051 if (count != 0) { 2052 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count)); 2053 } else { 2054 move_regs(left->as_register(), dest->as_register()); 2055 } 2056 } else if (dest->is_double_cpu()) { 2057 count &= 63; 2058 if (count != 0) { 2059 Register dest_lo = dest->as_register_lo(); 2060 Register dest_hi = dest->as_register_hi(); 2061 Register src_lo = left->as_register_lo(); 2062 Register src_hi = left->as_register_hi(); 2063 // Resolve possible register conflicts 2064 if (shift == lsl && dest_hi == src_lo) { 2065 dest_hi = Rtemp; 2066 } else if (shift != lsl && dest_lo == src_hi) { 2067 dest_lo = Rtemp; 2068 } 2069 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count); 2070 move_regs(dest_lo, dest->as_register_lo()); 2071 move_regs(dest_hi, dest->as_register_hi()); 2072 } else { 2073 __ long_move(dest->as_register_lo(), dest->as_register_hi(), 2074 left->as_register_lo(), left->as_register_hi()); 2075 } 2076 } else { 2077 ShouldNotReachHere(); 2078 } 2079 } 2080 2081 2082 // Saves 4 given registers in reserved argument area. 2083 void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2084 verify_reserved_argument_area_size(4); 2085 __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4)); 2086 } 2087 2088 // Restores 4 given registers from reserved argument area. 2089 void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2090 __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback); 2091 } 2092 2093 2094 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2095 ciArrayKlass* default_type = op->expected_type(); 2096 Register src = op->src()->as_register(); 2097 Register src_pos = op->src_pos()->as_register(); 2098 Register dst = op->dst()->as_register(); 2099 Register dst_pos = op->dst_pos()->as_register(); 2100 Register length = op->length()->as_register(); 2101 Register tmp = op->tmp()->as_register(); 2102 Register tmp2 = Rtemp; 2103 2104 assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption"); 2105 2106 CodeStub* stub = op->stub(); 2107 2108 int flags = op->flags(); 2109 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 2110 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2111 2112 // If we don't know anything or it's an object array, just go through the generic arraycopy 2113 if (default_type == NULL) { 2114 2115 // save arguments, because they will be killed by a runtime call 2116 save_in_reserved_area(R0, R1, R2, R3); 2117 2118 // pass length argument on SP[0] 2119 __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment 2120 2121 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2122 assert(copyfunc_addr != NULL, "generic arraycopy stub required"); 2123 #ifndef PRODUCT 2124 if (PrintC1Statistics) { 2125 __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2); 2126 } 2127 #endif // !PRODUCT 2128 // the stub is in the code cache so close enough 2129 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2130 2131 __ add(SP, SP, 2*wordSize); 2132 2133 __ cbz_32(R0, *stub->continuation()); 2134 2135 __ mvn_32(tmp, R0); 2136 restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only 2137 __ sub_32(length, length, tmp); 2138 __ add_32(src_pos, src_pos, tmp); 2139 __ add_32(dst_pos, dst_pos, tmp); 2140 2141 __ b(*stub->entry()); 2142 2143 __ bind(*stub->continuation()); 2144 return; 2145 } 2146 2147 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), 2148 "must be true at this point"); 2149 int elem_size = type2aelembytes(basic_type); 2150 int shift = exact_log2(elem_size); 2151 2152 // Check for NULL 2153 if (flags & LIR_OpArrayCopy::src_null_check) { 2154 if (flags & LIR_OpArrayCopy::dst_null_check) { 2155 __ cmp(src, 0); 2156 __ cond_cmp(dst, 0, ne); // make one instruction shorter if both checks are needed 2157 __ b(*stub->entry(), eq); 2158 } else { 2159 __ cbz(src, *stub->entry()); 2160 } 2161 } else if (flags & LIR_OpArrayCopy::dst_null_check) { 2162 __ cbz(dst, *stub->entry()); 2163 } 2164 2165 // If the compiler was not able to prove that exact type of the source or the destination 2166 // of the arraycopy is an array type, check at runtime if the source or the destination is 2167 // an instance type. 2168 if (flags & LIR_OpArrayCopy::type_check) { 2169 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2170 __ load_klass(tmp, dst); 2171 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2172 __ mov_slow(tmp, Klass::_lh_neutral_value); 2173 __ cmp_32(tmp2, tmp); 2174 __ b(*stub->entry(), ge); 2175 } 2176 2177 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2178 __ load_klass(tmp, src); 2179 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2180 __ mov_slow(tmp, Klass::_lh_neutral_value); 2181 __ cmp_32(tmp2, tmp); 2182 __ b(*stub->entry(), ge); 2183 } 2184 } 2185 2186 // Check if negative 2187 const int all_positive_checks = LIR_OpArrayCopy::src_pos_positive_check | 2188 LIR_OpArrayCopy::dst_pos_positive_check | 2189 LIR_OpArrayCopy::length_positive_check; 2190 switch (flags & all_positive_checks) { 2191 case LIR_OpArrayCopy::src_pos_positive_check: 2192 __ branch_if_negative_32(src_pos, *stub->entry()); 2193 break; 2194 case LIR_OpArrayCopy::dst_pos_positive_check: 2195 __ branch_if_negative_32(dst_pos, *stub->entry()); 2196 break; 2197 case LIR_OpArrayCopy::length_positive_check: 2198 __ branch_if_negative_32(length, *stub->entry()); 2199 break; 2200 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::dst_pos_positive_check: 2201 __ branch_if_any_negative_32(src_pos, dst_pos, tmp, *stub->entry()); 2202 break; 2203 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2204 __ branch_if_any_negative_32(src_pos, length, tmp, *stub->entry()); 2205 break; 2206 case LIR_OpArrayCopy::dst_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2207 __ branch_if_any_negative_32(dst_pos, length, tmp, *stub->entry()); 2208 break; 2209 case all_positive_checks: 2210 __ branch_if_any_negative_32(src_pos, dst_pos, length, tmp, *stub->entry()); 2211 break; 2212 default: 2213 assert((flags & all_positive_checks) == 0, "the last option"); 2214 } 2215 2216 // Range checks 2217 if (flags & LIR_OpArrayCopy::src_range_check) { 2218 __ ldr_s32(tmp2, Address(src, arrayOopDesc::length_offset_in_bytes())); 2219 __ add_32(tmp, src_pos, length); 2220 __ cmp_32(tmp, tmp2); 2221 __ b(*stub->entry(), hi); 2222 } 2223 if (flags & LIR_OpArrayCopy::dst_range_check) { 2224 __ ldr_s32(tmp2, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2225 __ add_32(tmp, dst_pos, length); 2226 __ cmp_32(tmp, tmp2); 2227 __ b(*stub->entry(), hi); 2228 } 2229 2230 // Check if src and dst are of the same type 2231 if (flags & LIR_OpArrayCopy::type_check) { 2232 // We don't know the array types are compatible 2233 if (basic_type != T_OBJECT) { 2234 // Simple test for basic type arrays 2235 if (UseCompressedClassPointers) { 2236 // We don't need decode because we just need to compare 2237 __ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes())); 2238 __ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes())); 2239 __ cmp_32(tmp, tmp2); 2240 } else { 2241 __ load_klass(tmp, src); 2242 __ load_klass(tmp2, dst); 2243 __ cmp(tmp, tmp2); 2244 } 2245 __ b(*stub->entry(), ne); 2246 } else { 2247 // For object arrays, if src is a sub class of dst then we can 2248 // safely do the copy. 2249 Label cont, slow; 2250 2251 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2252 2253 __ load_klass(tmp, src); 2254 __ load_klass(tmp2, dst); 2255 2256 // We are at a call so all live registers are saved before we 2257 // get here 2258 assert_different_registers(tmp, tmp2, R6, altFP_7_11); 2259 2260 __ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); 2261 2262 __ mov(R6, R0); 2263 __ mov(altFP_7_11, R1); 2264 __ mov(R0, tmp); 2265 __ mov(R1, tmp2); 2266 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp 2267 __ cmp_32(R0, 0); 2268 __ mov(R0, R6); 2269 __ mov(R1, altFP_7_11); 2270 2271 if (copyfunc_addr != NULL) { // use stub if available 2272 // src is not a sub class of dst so we have to do a 2273 // per-element check. 2274 2275 __ b(cont, ne); 2276 2277 __ bind(slow); 2278 2279 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2280 if ((flags & mask) != mask) { 2281 // Check that at least both of them object arrays. 2282 assert(flags & mask, "one of the two should be known to be an object array"); 2283 2284 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2285 __ load_klass(tmp, src); 2286 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2287 __ load_klass(tmp, dst); 2288 } 2289 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2290 2291 __ ldr_u32(tmp2, Address(tmp, lh_offset)); 2292 2293 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2294 __ mov_slow(tmp, objArray_lh); 2295 __ cmp_32(tmp, tmp2); 2296 __ b(*stub->entry(), ne); 2297 } 2298 2299 save_in_reserved_area(R0, R1, R2, R3); 2300 2301 Register src_ptr = R0; 2302 Register dst_ptr = R1; 2303 Register len = R2; 2304 Register chk_off = R3; 2305 Register super_k = tmp; 2306 2307 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2308 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2309 2310 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2311 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2312 __ load_klass(tmp, dst); 2313 2314 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2315 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2316 2317 __ ldr(super_k, Address(tmp, ek_offset)); 2318 2319 __ mov(len, length); 2320 __ ldr_u32(chk_off, Address(super_k, sco_offset)); 2321 __ push(super_k); 2322 2323 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2324 2325 #ifndef PRODUCT 2326 if (PrintC1Statistics) { 2327 Label failed; 2328 __ cbnz_32(R0, failed); 2329 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2); 2330 __ bind(failed); 2331 } 2332 #endif // PRODUCT 2333 2334 __ add(SP, SP, wordSize); // Drop super_k argument 2335 2336 __ cbz_32(R0, *stub->continuation()); 2337 __ mvn_32(tmp, R0); 2338 2339 // load saved arguments in slow case only 2340 restore_from_reserved_area(R0, R1, R2, R3); 2341 2342 __ sub_32(length, length, tmp); 2343 __ add_32(src_pos, src_pos, tmp); 2344 __ add_32(dst_pos, dst_pos, tmp); 2345 2346 #ifndef PRODUCT 2347 if (PrintC1Statistics) { 2348 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2); 2349 } 2350 #endif 2351 2352 __ b(*stub->entry()); 2353 2354 __ bind(cont); 2355 } else { 2356 __ b(*stub->entry(), eq); 2357 __ bind(cont); 2358 } 2359 } 2360 } 2361 2362 #ifndef PRODUCT 2363 if (PrintC1Statistics) { 2364 address counter = Runtime1::arraycopy_count_address(basic_type); 2365 __ inc_counter(counter, tmp, tmp2); 2366 } 2367 #endif // !PRODUCT 2368 2369 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2370 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2371 const char *name; 2372 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2373 2374 Register src_ptr = R0; 2375 Register dst_ptr = R1; 2376 Register len = R2; 2377 2378 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2379 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2380 2381 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2382 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2383 2384 __ mov(len, length); 2385 2386 __ call(entry, relocInfo::runtime_call_type); 2387 2388 __ bind(*stub->continuation()); 2389 } 2390 2391 #ifdef ASSERT 2392 // emit run-time assertion 2393 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2394 assert(op->code() == lir_assert, "must be"); 2395 2396 if (op->in_opr1()->is_valid()) { 2397 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 2398 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 2399 } else { 2400 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 2401 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 2402 } 2403 2404 Label ok; 2405 if (op->condition() != lir_cond_always) { 2406 AsmCondition acond = al; 2407 switch (op->condition()) { 2408 case lir_cond_equal: acond = eq; break; 2409 case lir_cond_notEqual: acond = ne; break; 2410 case lir_cond_less: acond = lt; break; 2411 case lir_cond_lessEqual: acond = le; break; 2412 case lir_cond_greaterEqual: acond = ge; break; 2413 case lir_cond_greater: acond = gt; break; 2414 case lir_cond_aboveEqual: acond = hs; break; 2415 case lir_cond_belowEqual: acond = ls; break; 2416 default: ShouldNotReachHere(); 2417 } 2418 __ b(ok, acond); 2419 } 2420 if (op->halt()) { 2421 const char* str = __ code_string(op->msg()); 2422 __ stop(str); 2423 } else { 2424 breakpoint(); 2425 } 2426 __ bind(ok); 2427 } 2428 #endif // ASSERT 2429 2430 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2431 fatal("CRC32 intrinsic is not implemented on this platform"); 2432 } 2433 2434 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2435 Register obj = op->obj_opr()->as_pointer_register(); 2436 Register hdr = op->hdr_opr()->as_pointer_register(); 2437 Register lock = op->lock_opr()->as_pointer_register(); 2438 Register tmp = op->scratch_opr()->is_illegal() ? noreg : 2439 op->scratch_opr()->as_pointer_register(); 2440 2441 if (!UseFastLocking) { 2442 __ b(*op->stub()->entry()); 2443 } else if (op->code() == lir_lock) { 2444 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2445 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry()); 2446 if (op->info() != NULL) { 2447 add_debug_info_for_null_check(null_check_offset, op->info()); 2448 } 2449 } else if (op->code() == lir_unlock) { 2450 __ unlock_object(hdr, obj, lock, tmp, *op->stub()->entry()); 2451 } else { 2452 ShouldNotReachHere(); 2453 } 2454 __ bind(*op->stub()->continuation()); 2455 } 2456 2457 2458 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2459 ciMethod* method = op->profiled_method(); 2460 int bci = op->profiled_bci(); 2461 ciMethod* callee = op->profiled_callee(); 2462 2463 // Update counter for all call types 2464 ciMethodData* md = method->method_data_or_null(); 2465 assert(md != NULL, "Sanity"); 2466 ciProfileData* data = md->bci_to_data(bci); 2467 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2468 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2469 Register mdo = op->mdo()->as_register(); 2470 assert(op->tmp1()->is_register(), "tmp1 must be allocated"); 2471 Register tmp1 = op->tmp1()->as_pointer_register(); 2472 assert_different_registers(mdo, tmp1); 2473 __ mov_metadata(mdo, md->constant_encoding()); 2474 int mdo_offset_bias = 0; 2475 int max_offset = 4096; 2476 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) { 2477 // The offset is large so bias the mdo by the base of the slot so 2478 // that the ldr can use an immediate offset to reference the slots of the data 2479 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2480 __ mov_slow(tmp1, mdo_offset_bias); 2481 __ add(mdo, mdo, tmp1); 2482 } 2483 2484 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2485 // Perform additional virtual call profiling for invokevirtual and 2486 // invokeinterface bytecodes 2487 if (op->should_profile_receiver_type()) { 2488 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2489 Register recv = op->recv()->as_register(); 2490 assert_different_registers(mdo, tmp1, recv); 2491 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2492 ciKlass* known_klass = op->known_holder(); 2493 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2494 // We know the type that will be seen at this call site; we can 2495 // statically update the MethodData* rather than needing to do 2496 // dynamic tests on the receiver type 2497 2498 // NOTE: we should probably put a lock around this search to 2499 // avoid collisions by concurrent compilations 2500 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2501 uint i; 2502 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2503 ciKlass* receiver = vc_data->receiver(i); 2504 if (known_klass->equals(receiver)) { 2505 Address data_addr(mdo, md->byte_offset_of_slot(data, 2506 VirtualCallData::receiver_count_offset(i)) - 2507 mdo_offset_bias); 2508 __ ldr(tmp1, data_addr); 2509 __ add(tmp1, tmp1, DataLayout::counter_increment); 2510 __ str(tmp1, data_addr); 2511 return; 2512 } 2513 } 2514 2515 // Receiver type not found in profile data; select an empty slot 2516 2517 // Note that this is less efficient than it should be because it 2518 // always does a write to the receiver part of the 2519 // VirtualCallData rather than just the first time 2520 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2521 ciKlass* receiver = vc_data->receiver(i); 2522 if (receiver == NULL) { 2523 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 2524 mdo_offset_bias); 2525 __ mov_metadata(tmp1, known_klass->constant_encoding()); 2526 __ str(tmp1, recv_addr); 2527 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 2528 mdo_offset_bias); 2529 __ ldr(tmp1, data_addr); 2530 __ add(tmp1, tmp1, DataLayout::counter_increment); 2531 __ str(tmp1, data_addr); 2532 return; 2533 } 2534 } 2535 } else { 2536 __ load_klass(recv, recv); 2537 Label update_done; 2538 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2539 // Receiver did not match any saved receiver and there is no empty row for it. 2540 // Increment total counter to indicate polymorphic case. 2541 __ ldr(tmp1, counter_addr); 2542 __ add(tmp1, tmp1, DataLayout::counter_increment); 2543 __ str(tmp1, counter_addr); 2544 2545 __ bind(update_done); 2546 } 2547 } else { 2548 // Static call 2549 __ ldr(tmp1, counter_addr); 2550 __ add(tmp1, tmp1, DataLayout::counter_increment); 2551 __ str(tmp1, counter_addr); 2552 } 2553 } 2554 2555 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2556 fatal("Type profiling not implemented on this platform"); 2557 } 2558 2559 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 2560 Unimplemented(); 2561 } 2562 2563 2564 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 2565 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2566 __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp()); 2567 } 2568 2569 2570 void LIR_Assembler::align_backward_branch_target() { 2571 // Some ARM processors do better with 8-byte branch target alignment 2572 __ align(8); 2573 } 2574 2575 2576 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2577 // tmp must be unused 2578 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2579 2580 if (left->is_single_cpu()) { 2581 assert (dest->type() == T_INT, "unexpected result type"); 2582 assert (left->type() == T_INT, "unexpected left type"); 2583 __ neg_32(dest->as_register(), left->as_register()); 2584 } else if (left->is_double_cpu()) { 2585 Register dest_lo = dest->as_register_lo(); 2586 Register dest_hi = dest->as_register_hi(); 2587 Register src_lo = left->as_register_lo(); 2588 Register src_hi = left->as_register_hi(); 2589 if (dest_lo == src_hi) { 2590 dest_lo = Rtemp; 2591 } 2592 __ rsbs(dest_lo, src_lo, 0); 2593 __ rsc(dest_hi, src_hi, 0); 2594 move_regs(dest_lo, dest->as_register_lo()); 2595 } else if (left->is_single_fpu()) { 2596 __ neg_float(dest->as_float_reg(), left->as_float_reg()); 2597 } else if (left->is_double_fpu()) { 2598 __ neg_double(dest->as_double_reg(), left->as_double_reg()); 2599 } else { 2600 ShouldNotReachHere(); 2601 } 2602 } 2603 2604 2605 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2606 assert(patch_code == lir_patch_none, "Patch code not supported"); 2607 LIR_Address* addr = addr_opr->as_address_ptr(); 2608 if (addr->index()->is_illegal()) { 2609 jint c = addr->disp(); 2610 if (!Assembler::is_arith_imm_in_range(c)) { 2611 BAILOUT("illegal arithmetic operand"); 2612 } 2613 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c); 2614 } else { 2615 assert(addr->disp() == 0, "cannot handle otherwise"); 2616 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), 2617 AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale())); 2618 } 2619 } 2620 2621 2622 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2623 assert(!tmp->is_valid(), "don't need temporary"); 2624 __ call(dest); 2625 if (info != NULL) { 2626 add_call_info_here(info); 2627 } 2628 } 2629 2630 2631 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2632 assert(src->is_double_cpu() && dest->is_address() || 2633 src->is_address() && dest->is_double_cpu(), 2634 "Simple move_op is called for all other cases"); 2635 2636 int null_check_offset; 2637 if (dest->is_address()) { 2638 // Store 2639 const LIR_Address* addr = dest->as_address_ptr(); 2640 const Register src_lo = src->as_register_lo(); 2641 const Register src_hi = src->as_register_hi(); 2642 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2643 2644 if (src_lo < src_hi) { 2645 null_check_offset = __ offset(); 2646 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi)); 2647 } else { 2648 assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2649 __ mov(Rtemp, src_hi); 2650 null_check_offset = __ offset(); 2651 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp)); 2652 } 2653 } else { 2654 // Load 2655 const LIR_Address* addr = src->as_address_ptr(); 2656 const Register dest_lo = dest->as_register_lo(); 2657 const Register dest_hi = dest->as_register_hi(); 2658 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2659 2660 null_check_offset = __ offset(); 2661 if (dest_lo < dest_hi) { 2662 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi)); 2663 } else { 2664 assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2665 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp)); 2666 __ mov(dest_hi, Rtemp); 2667 } 2668 } 2669 2670 if (info != NULL) { 2671 add_debug_info_for_null_check(null_check_offset, info); 2672 } 2673 } 2674 2675 2676 void LIR_Assembler::membar() { 2677 __ membar(MacroAssembler::StoreLoad, Rtemp); 2678 } 2679 2680 void LIR_Assembler::membar_acquire() { 2681 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 2682 } 2683 2684 void LIR_Assembler::membar_release() { 2685 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2686 } 2687 2688 void LIR_Assembler::membar_loadload() { 2689 __ membar(MacroAssembler::LoadLoad, Rtemp); 2690 } 2691 2692 void LIR_Assembler::membar_storestore() { 2693 __ membar(MacroAssembler::StoreStore, Rtemp); 2694 } 2695 2696 void LIR_Assembler::membar_loadstore() { 2697 __ membar(MacroAssembler::LoadStore, Rtemp); 2698 } 2699 2700 void LIR_Assembler::membar_storeload() { 2701 __ membar(MacroAssembler::StoreLoad, Rtemp); 2702 } 2703 2704 void LIR_Assembler::on_spin_wait() { 2705 Unimplemented(); 2706 } 2707 2708 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2709 // Not used on ARM 2710 Unimplemented(); 2711 } 2712 2713 void LIR_Assembler::peephole(LIR_List* lir) { 2714 LIR_OpList* inst = lir->instructions_list(); 2715 const int inst_length = inst->length(); 2716 for (int i = 0; i < inst_length; i++) { 2717 LIR_Op* op = inst->at(i); 2718 switch (op->code()) { 2719 case lir_cmp: { 2720 // Replace: 2721 // cmp rX, y 2722 // cmove [EQ] y, z, rX 2723 // with 2724 // cmp rX, y 2725 // cmove [EQ] illegalOpr, z, rX 2726 // 2727 // or 2728 // cmp rX, y 2729 // cmove [NE] z, y, rX 2730 // with 2731 // cmp rX, y 2732 // cmove [NE] z, illegalOpr, rX 2733 // 2734 // moves from illegalOpr should be removed when converting LIR to native assembly 2735 2736 LIR_Op2* cmp = op->as_Op2(); 2737 assert(cmp != NULL, "cmp LIR instruction is not an op2"); 2738 2739 if (i + 1 < inst_length) { 2740 LIR_Op2* cmove = inst->at(i + 1)->as_Op2(); 2741 if (cmove != NULL && cmove->code() == lir_cmove) { 2742 LIR_Opr cmove_res = cmove->result_opr(); 2743 bool res_is_op1 = cmove_res == cmp->in_opr1(); 2744 bool res_is_op2 = cmove_res == cmp->in_opr2(); 2745 LIR_Opr cmp_res, cmp_arg; 2746 if (res_is_op1) { 2747 cmp_res = cmp->in_opr1(); 2748 cmp_arg = cmp->in_opr2(); 2749 } else if (res_is_op2) { 2750 cmp_res = cmp->in_opr2(); 2751 cmp_arg = cmp->in_opr1(); 2752 } else { 2753 cmp_res = LIR_OprFact::illegalOpr; 2754 cmp_arg = LIR_OprFact::illegalOpr; 2755 } 2756 2757 if (cmp_res != LIR_OprFact::illegalOpr) { 2758 LIR_Condition cond = cmove->condition(); 2759 if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) { 2760 cmove->set_in_opr1(LIR_OprFact::illegalOpr); 2761 } else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) { 2762 cmove->set_in_opr2(LIR_OprFact::illegalOpr); 2763 } 2764 } 2765 } 2766 } 2767 break; 2768 } 2769 2770 default: 2771 break; 2772 } 2773 } 2774 } 2775 2776 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2777 assert(src->is_address(), "sanity"); 2778 Address addr = as_Address(src->as_address_ptr()); 2779 2780 if (code == lir_xchg) { 2781 } else { 2782 assert (!data->is_oop(), "xadd for oops"); 2783 } 2784 2785 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2786 2787 Label retry; 2788 __ bind(retry); 2789 2790 if (data->type() == T_INT || data->is_oop()) { 2791 Register dst = dest->as_register(); 2792 Register new_val = noreg; 2793 __ ldrex(dst, addr); 2794 if (code == lir_xadd) { 2795 Register tmp_reg = tmp->as_register(); 2796 if (data->is_constant()) { 2797 assert_different_registers(dst, tmp_reg); 2798 __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint()); 2799 } else { 2800 assert_different_registers(dst, tmp_reg, data->as_register()); 2801 __ add_32(tmp_reg, dst, data->as_register()); 2802 } 2803 new_val = tmp_reg; 2804 } else { 2805 if (UseCompressedOops && data->is_oop()) { 2806 new_val = tmp->as_pointer_register(); 2807 } else { 2808 new_val = data->as_register(); 2809 } 2810 assert_different_registers(dst, new_val); 2811 } 2812 __ strex(Rtemp, new_val, addr); 2813 2814 } else if (data->type() == T_LONG) { 2815 Register dst_lo = dest->as_register_lo(); 2816 Register new_val_lo = noreg; 2817 Register dst_hi = dest->as_register_hi(); 2818 2819 assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair"); 2820 assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2821 2822 __ bind(retry); 2823 __ ldrexd(dst_lo, addr); 2824 if (code == lir_xadd) { 2825 Register tmp_lo = tmp->as_register_lo(); 2826 Register tmp_hi = tmp->as_register_hi(); 2827 2828 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 2829 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2830 2831 if (data->is_constant()) { 2832 jlong c = data->as_constant_ptr()->as_jlong(); 2833 assert((jlong)((jint)c) == c, "overflow"); 2834 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi); 2835 __ adds(tmp_lo, dst_lo, (jint)c); 2836 __ adc(tmp_hi, dst_hi, 0); 2837 } else { 2838 Register new_val_lo = data->as_register_lo(); 2839 Register new_val_hi = data->as_register_hi(); 2840 __ adds(tmp_lo, dst_lo, new_val_lo); 2841 __ adc(tmp_hi, dst_hi, new_val_hi); 2842 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi); 2843 } 2844 new_val_lo = tmp_lo; 2845 } else { 2846 new_val_lo = data->as_register_lo(); 2847 Register new_val_hi = data->as_register_hi(); 2848 2849 assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi); 2850 assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair"); 2851 assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2852 } 2853 __ strexd(Rtemp, new_val_lo, addr); 2854 } else { 2855 ShouldNotReachHere(); 2856 } 2857 2858 __ cbnz_32(Rtemp, retry); 2859 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 2860 2861 } 2862 2863 #undef __