1 /* 2 * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "memory/universe.hpp" 36 #include "nativeInst_arm.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/powerOfTwo.hpp" 42 #include "vmreg_arm.inline.hpp" 43 44 #define __ _masm-> 45 46 // Note: Rtemp usage is this file should not impact C2 and should be 47 // correct as long as it is not implicitly used in lower layers (the 48 // arm [macro]assembler) and used with care in the other C1 specific 49 // files. 50 51 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 52 ShouldNotCallThis(); // Not used on ARM 53 return false; 54 } 55 56 57 LIR_Opr LIR_Assembler::receiverOpr() { 58 // The first register in Java calling conventions 59 return FrameMap::R0_oop_opr; 60 } 61 62 LIR_Opr LIR_Assembler::osrBufferPointer() { 63 return FrameMap::as_pointer_opr(R0); 64 } 65 66 #ifndef PRODUCT 67 void LIR_Assembler::verify_reserved_argument_area_size(int args_count) { 68 assert(args_count * wordSize <= frame_map()->reserved_argument_area_size(), "not enough space for arguments"); 69 } 70 #endif // !PRODUCT 71 72 void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) { 73 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 74 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 75 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 76 __ mov_slow(Rtemp, c); 77 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 78 } 79 80 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_sp_in_words) { 81 assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); 82 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; 83 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space"); 84 __ mov_metadata(Rtemp, m); 85 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes)); 86 } 87 88 //--------------fpu register translations----------------------- 89 90 91 void LIR_Assembler::breakpoint() { 92 __ breakpoint(); 93 } 94 95 void LIR_Assembler::push(LIR_Opr opr) { 96 Unimplemented(); 97 } 98 99 void LIR_Assembler::pop(LIR_Opr opr) { 100 Unimplemented(); 101 } 102 103 //------------------------------------------- 104 Address LIR_Assembler::as_Address(LIR_Address* addr) { 105 Register base = addr->base()->as_pointer_register(); 106 107 108 if (addr->index()->is_illegal() || addr->index()->is_constant()) { 109 int offset = addr->disp(); 110 if (addr->index()->is_constant()) { 111 offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale(); 112 } 113 114 if ((offset <= -4096) || (offset >= 4096)) { 115 BAILOUT_("offset not in range", Address(base)); 116 } 117 118 return Address(base, offset); 119 120 } else { 121 assert(addr->disp() == 0, "can't have both"); 122 int scale = addr->scale(); 123 124 assert(addr->index()->is_single_cpu(), "should be"); 125 return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) : 126 Address(base, addr->index()->as_register(), lsr, -scale); 127 } 128 } 129 130 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 131 Address base = as_Address(addr); 132 assert(base.index() == noreg, "must be"); 133 if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); } 134 return Address(base.base(), base.disp() + BytesPerWord); 135 } 136 137 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 138 return as_Address(addr); 139 } 140 141 142 void LIR_Assembler::osr_entry() { 143 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 144 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 145 ValueStack* entry_state = osr_entry->end()->state(); 146 int number_of_locks = entry_state->locks_size(); 147 148 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 149 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 150 151 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 152 int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord; 153 for (int i = 0; i < number_of_locks; i++) { 154 int slot_offset = monitor_offset - (i * 2 * BytesPerWord); 155 if (slot_offset >= 4096 - BytesPerWord) { 156 __ add_slow(R2, OSR_buf, slot_offset); 157 __ ldr(R1, Address(R2, 0*BytesPerWord)); 158 __ ldr(R2, Address(R2, 1*BytesPerWord)); 159 } else { 160 __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord)); 161 __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 162 } 163 __ str(R1, frame_map()->address_for_monitor_lock(i)); 164 __ str(R2, frame_map()->address_for_monitor_object(i)); 165 } 166 } 167 168 169 int LIR_Assembler::check_icache() { 170 return __ ic_check(CodeEntryAlignment); 171 } 172 173 void LIR_Assembler::clinit_barrier(ciMethod* method) { 174 ShouldNotReachHere(); // not implemented 175 } 176 177 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 178 jobject o = (jobject)Universe::non_oop_word(); 179 int index = __ oop_recorder()->allocate_oop_index(o); 180 181 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index); 182 183 __ patchable_mov_oop(reg, o, index); 184 patching_epilog(patch, lir_patch_normal, reg, info); 185 } 186 187 188 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 189 Metadata* o = (Metadata*)Universe::non_oop_word(); 190 int index = __ oop_recorder()->allocate_metadata_index(o); 191 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 192 193 __ patchable_mov_metadata(reg, o, index); 194 patching_epilog(patch, lir_patch_normal, reg, info); 195 } 196 197 198 int LIR_Assembler::initial_frame_size_in_bytes() const { 199 // Subtracts two words to account for return address and link 200 return frame_map()->framesize()*VMRegImpl::stack_slot_size - 2*wordSize; 201 } 202 203 204 int LIR_Assembler::emit_exception_handler() { 205 address handler_base = __ start_a_stub(exception_handler_size()); 206 if (handler_base == nullptr) { 207 bailout("exception handler overflow"); 208 return -1; 209 } 210 211 int offset = code_offset(); 212 213 // check that there is really an exception 214 __ verify_not_null_oop(Rexception_obj); 215 216 __ call(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id), relocInfo::runtime_call_type); 217 __ should_not_reach_here(); 218 219 assert(code_offset() - offset <= exception_handler_size(), "overflow"); 220 __ end_a_stub(); 221 222 return offset; 223 } 224 225 // Emit the code to remove the frame from the stack in the exception 226 // unwind path. 227 int LIR_Assembler::emit_unwind_handler() { 228 #ifndef PRODUCT 229 if (CommentedAssembly) { 230 _masm->block_comment("Unwind handler"); 231 } 232 #endif 233 234 int offset = code_offset(); 235 236 // Fetch the exception from TLS and clear out exception related thread state 237 Register zero = __ zero_register(Rtemp); 238 __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset())); 239 __ str(zero, Address(Rthread, JavaThread::exception_oop_offset())); 240 __ str(zero, Address(Rthread, JavaThread::exception_pc_offset())); 241 242 __ bind(_unwind_handler_entry); 243 __ verify_not_null_oop(Rexception_obj); 244 245 // Perform needed unlocking 246 MonitorExitStub* stub = nullptr; 247 if (method()->is_synchronized()) { 248 monitor_address(0, FrameMap::R0_opr); 249 stub = new MonitorExitStub(FrameMap::R0_opr, true, 0); 250 __ unlock_object(R2, R1, R0, *stub->entry()); 251 __ bind(*stub->continuation()); 252 } 253 254 // remove the activation and dispatch to the unwind handler 255 __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR 256 __ jump(Runtime1::entry_for(C1StubId::unwind_exception_id), relocInfo::runtime_call_type, Rtemp); 257 258 // Emit the slow path assembly 259 if (stub != nullptr) { 260 stub->emit_code(this); 261 } 262 263 return offset; 264 } 265 266 267 int LIR_Assembler::emit_deopt_handler() { 268 address handler_base = __ start_a_stub(deopt_handler_size()); 269 if (handler_base == nullptr) { 270 bailout("deopt handler overflow"); 271 return -1; 272 } 273 274 int offset = code_offset(); 275 276 __ mov_relative_address(LR, __ pc()); 277 __ push(LR); // stub expects LR to be saved 278 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg); 279 280 assert(code_offset() - offset <= deopt_handler_size(), "overflow"); 281 __ end_a_stub(); 282 283 return offset; 284 } 285 286 287 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 288 // Pop the frame before safepoint polling 289 __ remove_frame(initial_frame_size_in_bytes()); 290 __ read_polling_page(Rtemp, relocInfo::poll_return_type); 291 __ ret(); 292 } 293 294 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 295 296 int offset = __ offset(); 297 __ get_polling_page(Rtemp); 298 __ relocate(relocInfo::poll_type); 299 add_debug_info_for_branch(info); // help pc_desc_at to find correct scope for current PC 300 __ ldr(Rtemp, Address(Rtemp)); 301 302 return offset; 303 } 304 305 306 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 307 if (from_reg != to_reg) { 308 __ mov(to_reg, from_reg); 309 } 310 } 311 312 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 313 assert(src->is_constant() && dest->is_register(), "must be"); 314 LIR_Const* c = src->as_constant_ptr(); 315 316 switch (c->type()) { 317 case T_ADDRESS: 318 case T_INT: 319 assert(patch_code == lir_patch_none, "no patching handled here"); 320 __ mov_slow(dest->as_register(), c->as_jint()); 321 break; 322 323 case T_LONG: 324 assert(patch_code == lir_patch_none, "no patching handled here"); 325 __ mov_slow(dest->as_register_lo(), c->as_jint_lo()); 326 __ mov_slow(dest->as_register_hi(), c->as_jint_hi()); 327 break; 328 329 case T_OBJECT: 330 if (patch_code == lir_patch_none) { 331 __ mov_oop(dest->as_register(), c->as_jobject()); 332 } else { 333 jobject2reg_with_patching(dest->as_register(), info); 334 } 335 break; 336 337 case T_METADATA: 338 if (patch_code == lir_patch_none) { 339 __ mov_metadata(dest->as_register(), c->as_metadata()); 340 } else { 341 klass2reg_with_patching(dest->as_register(), info); 342 } 343 break; 344 345 case T_FLOAT: 346 if (dest->is_single_fpu()) { 347 __ mov_float(dest->as_float_reg(), c->as_jfloat()); 348 } else { 349 // Simple getters can return float constant directly into r0 350 __ mov_slow(dest->as_register(), c->as_jint_bits()); 351 } 352 break; 353 354 case T_DOUBLE: 355 if (dest->is_double_fpu()) { 356 __ mov_double(dest->as_double_reg(), c->as_jdouble()); 357 } else { 358 // Simple getters can return double constant directly into r1r0 359 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits()); 360 __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits()); 361 } 362 break; 363 364 default: 365 ShouldNotReachHere(); 366 } 367 } 368 369 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 370 assert(src->is_constant(), "must be"); 371 assert(dest->is_stack(), "must be"); 372 LIR_Const* c = src->as_constant_ptr(); 373 374 switch (c->type()) { 375 case T_INT: // fall through 376 case T_FLOAT: 377 __ mov_slow(Rtemp, c->as_jint_bits()); 378 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 379 break; 380 381 case T_ADDRESS: 382 __ mov_slow(Rtemp, c->as_jint()); 383 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 384 break; 385 386 case T_OBJECT: 387 __ mov_oop(Rtemp, c->as_jobject()); 388 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 389 break; 390 391 case T_LONG: // fall through 392 case T_DOUBLE: 393 __ mov_slow(Rtemp, c->as_jint_lo_bits()); 394 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 395 if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) { 396 __ mov_slow(Rtemp, c->as_jint_hi_bits()); 397 } 398 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 399 break; 400 401 default: 402 ShouldNotReachHere(); 403 } 404 } 405 406 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 407 CodeEmitInfo* info, bool wide) { 408 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == nullptr),"cannot handle otherwise"); 409 __ mov(Rtemp, 0); 410 411 int null_check_offset = code_offset(); 412 __ str(Rtemp, as_Address(dest->as_address_ptr())); 413 414 if (info != nullptr) { 415 assert(false, "arm32 didn't support this before, investigate if bug"); 416 add_debug_info_for_null_check(null_check_offset, info); 417 } 418 } 419 420 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 421 assert(src->is_register() && dest->is_register(), "must be"); 422 423 if (src->is_single_cpu()) { 424 if (dest->is_single_cpu()) { 425 move_regs(src->as_register(), dest->as_register()); 426 } else if (dest->is_single_fpu()) { 427 __ fmsr(dest->as_float_reg(), src->as_register()); 428 } else { 429 ShouldNotReachHere(); 430 } 431 } else if (src->is_double_cpu()) { 432 if (dest->is_double_cpu()) { 433 __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi()); 434 } else { 435 __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi()); 436 } 437 } else if (src->is_single_fpu()) { 438 if (dest->is_single_fpu()) { 439 __ mov_float(dest->as_float_reg(), src->as_float_reg()); 440 } else if (dest->is_single_cpu()) { 441 __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg()); 442 } else { 443 ShouldNotReachHere(); 444 } 445 } else if (src->is_double_fpu()) { 446 if (dest->is_double_fpu()) { 447 __ mov_double(dest->as_double_reg(), src->as_double_reg()); 448 } else if (dest->is_double_cpu()) { 449 __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg()); 450 } else { 451 ShouldNotReachHere(); 452 } 453 } else { 454 ShouldNotReachHere(); 455 } 456 } 457 458 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 459 assert(src->is_register(), "should not call otherwise"); 460 assert(dest->is_stack(), "should not call otherwise"); 461 462 Address addr = dest->is_single_word() ? 463 frame_map()->address_for_slot(dest->single_stack_ix()) : 464 frame_map()->address_for_slot(dest->double_stack_ix()); 465 466 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 467 if (src->is_single_fpu() || src->is_double_fpu()) { 468 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 469 } 470 471 if (src->is_single_cpu()) { 472 switch (type) { 473 case T_OBJECT: 474 case T_ARRAY: __ verify_oop(src->as_register()); // fall through 475 case T_ADDRESS: 476 case T_METADATA: __ str(src->as_register(), addr); break; 477 case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through 478 case T_INT: __ str_32(src->as_register(), addr); break; 479 default: 480 ShouldNotReachHere(); 481 } 482 } else if (src->is_double_cpu()) { 483 __ str(src->as_register_lo(), addr); 484 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 485 } else if (src->is_single_fpu()) { 486 __ str_float(src->as_float_reg(), addr); 487 } else if (src->is_double_fpu()) { 488 __ str_double(src->as_double_reg(), addr); 489 } else { 490 ShouldNotReachHere(); 491 } 492 } 493 494 495 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, 496 LIR_PatchCode patch_code, CodeEmitInfo* info, 497 bool pop_fpu_stack, bool wide) { 498 LIR_Address* to_addr = dest->as_address_ptr(); 499 Register base_reg = to_addr->base()->as_pointer_register(); 500 const bool needs_patching = (patch_code != lir_patch_none); 501 502 PatchingStub* patch = nullptr; 503 if (needs_patching) { 504 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 505 } 506 507 int null_check_offset = code_offset(); 508 509 switch (type) { 510 case T_ARRAY: 511 case T_OBJECT: 512 if (UseCompressedOops && !wide) { 513 ShouldNotReachHere(); 514 } else { 515 __ str(src->as_register(), as_Address(to_addr)); 516 } 517 break; 518 519 case T_ADDRESS: 520 __ str(src->as_pointer_register(), as_Address(to_addr)); 521 break; 522 523 case T_BYTE: 524 case T_BOOLEAN: 525 __ strb(src->as_register(), as_Address(to_addr)); 526 break; 527 528 case T_CHAR: 529 case T_SHORT: 530 __ strh(src->as_register(), as_Address(to_addr)); 531 break; 532 533 case T_INT: 534 #ifdef __SOFTFP__ 535 case T_FLOAT: 536 #endif // __SOFTFP__ 537 __ str_32(src->as_register(), as_Address(to_addr)); 538 break; 539 540 541 #ifdef __SOFTFP__ 542 case T_DOUBLE: 543 #endif // __SOFTFP__ 544 case T_LONG: { 545 Register from_lo = src->as_register_lo(); 546 Register from_hi = src->as_register_hi(); 547 if (to_addr->index()->is_register()) { 548 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 549 assert(to_addr->disp() == 0, "Not yet supporting both"); 550 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 551 base_reg = Rtemp; 552 __ str(from_lo, Address(Rtemp)); 553 if (patch != nullptr) { 554 __ nop(); // see comment before patching_epilog for 2nd str 555 patching_epilog(patch, lir_patch_low, base_reg, info); 556 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 557 patch_code = lir_patch_high; 558 } 559 __ str(from_hi, Address(Rtemp, BytesPerWord)); 560 } else if (base_reg == from_lo) { 561 __ str(from_hi, as_Address_hi(to_addr)); 562 if (patch != nullptr) { 563 __ nop(); // see comment before patching_epilog for 2nd str 564 patching_epilog(patch, lir_patch_high, base_reg, info); 565 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 566 patch_code = lir_patch_low; 567 } 568 __ str(from_lo, as_Address_lo(to_addr)); 569 } else { 570 __ str(from_lo, as_Address_lo(to_addr)); 571 if (patch != nullptr) { 572 __ nop(); // see comment before patching_epilog for 2nd str 573 patching_epilog(patch, lir_patch_low, base_reg, info); 574 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 575 patch_code = lir_patch_high; 576 } 577 __ str(from_hi, as_Address_hi(to_addr)); 578 } 579 break; 580 } 581 582 #ifndef __SOFTFP__ 583 case T_FLOAT: 584 if (to_addr->index()->is_register()) { 585 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 586 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 587 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 588 __ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp())); 589 } else { 590 __ fsts(src->as_float_reg(), as_Address(to_addr)); 591 } 592 break; 593 594 case T_DOUBLE: 595 if (to_addr->index()->is_register()) { 596 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 597 __ add(Rtemp, base_reg, to_addr->index()->as_register()); 598 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 599 __ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp())); 600 } else { 601 __ fstd(src->as_double_reg(), as_Address(to_addr)); 602 } 603 break; 604 #endif // __SOFTFP__ 605 606 607 default: 608 ShouldNotReachHere(); 609 } 610 611 if (info != nullptr) { 612 add_debug_info_for_null_check(null_check_offset, info); 613 } 614 615 if (patch != nullptr) { 616 // Offset embedded into LDR/STR instruction may appear not enough 617 // to address a field. So, provide a space for one more instruction 618 // that will deal with larger offsets. 619 __ nop(); 620 patching_epilog(patch, patch_code, base_reg, info); 621 } 622 } 623 624 625 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 626 assert(src->is_stack(), "should not call otherwise"); 627 assert(dest->is_register(), "should not call otherwise"); 628 629 Address addr = src->is_single_word() ? 630 frame_map()->address_for_slot(src->single_stack_ix()) : 631 frame_map()->address_for_slot(src->double_stack_ix()); 632 633 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); 634 if (dest->is_single_fpu() || dest->is_double_fpu()) { 635 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } 636 } 637 638 if (dest->is_single_cpu()) { 639 switch (type) { 640 case T_OBJECT: 641 case T_ARRAY: 642 case T_ADDRESS: 643 case T_METADATA: __ ldr(dest->as_register(), addr); break; 644 case T_FLOAT: // used in floatToRawIntBits intrinsic implementation 645 case T_INT: __ ldr_u32(dest->as_register(), addr); break; 646 default: 647 ShouldNotReachHere(); 648 } 649 if ((type == T_OBJECT) || (type == T_ARRAY)) { 650 __ verify_oop(dest->as_register()); 651 } 652 } else if (dest->is_double_cpu()) { 653 __ ldr(dest->as_register_lo(), addr); 654 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 655 } else if (dest->is_single_fpu()) { 656 __ ldr_float(dest->as_float_reg(), addr); 657 } else if (dest->is_double_fpu()) { 658 __ ldr_double(dest->as_double_reg(), addr); 659 } else { 660 ShouldNotReachHere(); 661 } 662 } 663 664 665 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 666 if (src->is_single_stack()) { 667 switch (src->type()) { 668 case T_OBJECT: 669 case T_ARRAY: 670 case T_ADDRESS: 671 case T_METADATA: 672 __ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 673 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 674 break; 675 676 case T_INT: 677 case T_FLOAT: 678 __ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix())); 679 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); 680 break; 681 682 default: 683 ShouldNotReachHere(); 684 } 685 } else { 686 assert(src->is_double_stack(), "must be"); 687 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes)); 688 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); 689 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); 690 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); 691 } 692 } 693 694 695 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, 696 LIR_PatchCode patch_code, CodeEmitInfo* info, 697 bool wide) { 698 assert(src->is_address(), "should not call otherwise"); 699 assert(dest->is_register(), "should not call otherwise"); 700 LIR_Address* addr = src->as_address_ptr(); 701 702 Register base_reg = addr->base()->as_pointer_register(); 703 704 PatchingStub* patch = nullptr; 705 if (patch_code != lir_patch_none) { 706 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 707 } 708 if (info != nullptr) { 709 add_debug_info_for_null_check_here(info); 710 } 711 712 switch (type) { 713 case T_OBJECT: // fall through 714 case T_ARRAY: 715 if (UseCompressedOops && !wide) { 716 __ ldr_u32(dest->as_register(), as_Address(addr)); 717 } else { 718 __ ldr(dest->as_register(), as_Address(addr)); 719 } 720 break; 721 722 case T_ADDRESS: 723 __ ldr(dest->as_pointer_register(), as_Address(addr)); 724 break; 725 726 case T_INT: 727 #ifdef __SOFTFP__ 728 case T_FLOAT: 729 #endif // __SOFTFP__ 730 __ ldr(dest->as_pointer_register(), as_Address(addr)); 731 break; 732 733 case T_BOOLEAN: 734 __ ldrb(dest->as_register(), as_Address(addr)); 735 break; 736 737 case T_BYTE: 738 __ ldrsb(dest->as_register(), as_Address(addr)); 739 break; 740 741 case T_CHAR: 742 __ ldrh(dest->as_register(), as_Address(addr)); 743 break; 744 745 case T_SHORT: 746 __ ldrsh(dest->as_register(), as_Address(addr)); 747 break; 748 749 750 #ifdef __SOFTFP__ 751 case T_DOUBLE: 752 #endif // __SOFTFP__ 753 case T_LONG: { 754 Register to_lo = dest->as_register_lo(); 755 Register to_hi = dest->as_register_hi(); 756 if (addr->index()->is_register()) { 757 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 758 assert(addr->disp() == 0, "Not yet supporting both"); 759 __ add(Rtemp, base_reg, addr->index()->as_register()); 760 base_reg = Rtemp; 761 __ ldr(to_lo, Address(Rtemp)); 762 if (patch != nullptr) { 763 __ nop(); // see comment before patching_epilog for 2nd ldr 764 patching_epilog(patch, lir_patch_low, base_reg, info); 765 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 766 patch_code = lir_patch_high; 767 } 768 __ ldr(to_hi, Address(Rtemp, BytesPerWord)); 769 } else if (base_reg == to_lo) { 770 __ ldr(to_hi, as_Address_hi(addr)); 771 if (patch != nullptr) { 772 __ nop(); // see comment before patching_epilog for 2nd ldr 773 patching_epilog(patch, lir_patch_high, base_reg, info); 774 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 775 patch_code = lir_patch_low; 776 } 777 __ ldr(to_lo, as_Address_lo(addr)); 778 } else { 779 __ ldr(to_lo, as_Address_lo(addr)); 780 if (patch != nullptr) { 781 __ nop(); // see comment before patching_epilog for 2nd ldr 782 patching_epilog(patch, lir_patch_low, base_reg, info); 783 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 784 patch_code = lir_patch_high; 785 } 786 __ ldr(to_hi, as_Address_hi(addr)); 787 } 788 break; 789 } 790 791 #ifndef __SOFTFP__ 792 case T_FLOAT: 793 if (addr->index()->is_register()) { 794 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 795 __ add(Rtemp, base_reg, addr->index()->as_register()); 796 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 797 __ flds(dest->as_float_reg(), Address(Rtemp, addr->disp())); 798 } else { 799 __ flds(dest->as_float_reg(), as_Address(addr)); 800 } 801 break; 802 803 case T_DOUBLE: 804 if (addr->index()->is_register()) { 805 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register"); 806 __ add(Rtemp, base_reg, addr->index()->as_register()); 807 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); } 808 __ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp())); 809 } else { 810 __ fldd(dest->as_double_reg(), as_Address(addr)); 811 } 812 break; 813 #endif // __SOFTFP__ 814 815 816 default: 817 ShouldNotReachHere(); 818 } 819 820 if (patch != nullptr) { 821 // Offset embedded into LDR/STR instruction may appear not enough 822 // to address a field. So, provide a space for one more instruction 823 // that will deal with larger offsets. 824 __ nop(); 825 patching_epilog(patch, patch_code, base_reg, info); 826 } 827 828 } 829 830 831 void LIR_Assembler::emit_op3(LIR_Op3* op) { 832 bool is_32 = op->result_opr()->is_single_cpu(); 833 834 if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) { 835 int c = op->in_opr2()->as_constant_ptr()->as_jint(); 836 assert(is_power_of_2(c), "non power-of-2 constant should be put in a register"); 837 838 Register left = op->in_opr1()->as_register(); 839 Register dest = op->result_opr()->as_register(); 840 if (c == 1) { 841 __ mov(dest, left); 842 } else if (c == 2) { 843 __ add_32(dest, left, AsmOperand(left, lsr, 31)); 844 __ asr_32(dest, dest, 1); 845 } else if (c != (int) 0x80000000) { 846 int power = log2i_exact(c); 847 __ asr_32(Rtemp, left, 31); 848 __ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0); 849 __ asr_32(dest, dest, power); // dest = dest >>> power; 850 } else { 851 // x/0x80000000 is a special case, since dividend is a power of two, but is negative. 852 // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000. 853 __ cmp_32(left, c); 854 __ mov(dest, 0, ne); 855 __ mov(dest, 1, eq); 856 } 857 } else { 858 assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3"); 859 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type); 860 add_debug_info_for_div0_here(op->info()); 861 } 862 } 863 864 865 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 866 #ifdef ASSERT 867 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 868 if (op->block() != nullptr) _branch_target_blocks.append(op->block()); 869 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock()); 870 assert(op->info() == nullptr, "CodeEmitInfo?"); 871 #endif // ASSERT 872 873 #ifdef __SOFTFP__ 874 assert (op->code() != lir_cond_float_branch, "this should be impossible"); 875 #else 876 if (op->code() == lir_cond_float_branch) { 877 __ fmstat(); 878 __ b(*(op->ublock()->label()), vs); 879 } 880 #endif // __SOFTFP__ 881 882 AsmCondition acond = al; 883 switch (op->cond()) { 884 case lir_cond_equal: acond = eq; break; 885 case lir_cond_notEqual: acond = ne; break; 886 case lir_cond_less: acond = lt; break; 887 case lir_cond_lessEqual: acond = le; break; 888 case lir_cond_greaterEqual: acond = ge; break; 889 case lir_cond_greater: acond = gt; break; 890 case lir_cond_aboveEqual: acond = hs; break; 891 case lir_cond_belowEqual: acond = ls; break; 892 default: assert(op->cond() == lir_cond_always, "must be"); 893 } 894 __ b(*(op->label()), acond); 895 } 896 897 898 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 899 LIR_Opr src = op->in_opr(); 900 LIR_Opr dest = op->result_opr(); 901 902 switch (op->bytecode()) { 903 case Bytecodes::_i2l: 904 move_regs(src->as_register(), dest->as_register_lo()); 905 __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31)); 906 break; 907 case Bytecodes::_l2i: 908 move_regs(src->as_register_lo(), dest->as_register()); 909 break; 910 case Bytecodes::_i2b: 911 __ sign_extend(dest->as_register(), src->as_register(), 8); 912 break; 913 case Bytecodes::_i2s: 914 __ sign_extend(dest->as_register(), src->as_register(), 16); 915 break; 916 case Bytecodes::_i2c: 917 __ zero_extend(dest->as_register(), src->as_register(), 16); 918 break; 919 case Bytecodes::_f2d: 920 __ convert_f2d(dest->as_double_reg(), src->as_float_reg()); 921 break; 922 case Bytecodes::_d2f: 923 __ convert_d2f(dest->as_float_reg(), src->as_double_reg()); 924 break; 925 case Bytecodes::_i2f: 926 __ fmsr(Stemp, src->as_register()); 927 __ fsitos(dest->as_float_reg(), Stemp); 928 break; 929 case Bytecodes::_i2d: 930 __ fmsr(Stemp, src->as_register()); 931 __ fsitod(dest->as_double_reg(), Stemp); 932 break; 933 case Bytecodes::_f2i: 934 __ ftosizs(Stemp, src->as_float_reg()); 935 __ fmrs(dest->as_register(), Stemp); 936 break; 937 case Bytecodes::_d2i: 938 __ ftosizd(Stemp, src->as_double_reg()); 939 __ fmrs(dest->as_register(), Stemp); 940 break; 941 default: 942 ShouldNotReachHere(); 943 } 944 } 945 946 947 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 948 if (op->init_check()) { 949 Register tmp = op->tmp1()->as_register(); 950 __ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset())); 951 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 952 add_debug_info_for_null_check_here(op->stub()->info()); 953 __ cmp(tmp, InstanceKlass::fully_initialized); 954 __ b(*op->stub()->entry(), ne); 955 } 956 __ allocate_object(op->obj()->as_register(), 957 op->tmp1()->as_register(), 958 op->tmp2()->as_register(), 959 op->tmp3()->as_register(), 960 op->header_size(), 961 op->object_size(), 962 op->klass()->as_register(), 963 *op->stub()->entry()); 964 __ bind(*op->stub()->continuation()); 965 } 966 967 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 968 if (UseSlowPath || 969 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 970 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 971 __ b(*op->stub()->entry()); 972 } else { 973 __ allocate_array(op->obj()->as_register(), 974 op->len()->as_register(), 975 op->tmp1()->as_register(), 976 op->tmp2()->as_register(), 977 op->tmp3()->as_register(), 978 arrayOopDesc::base_offset_in_bytes(op->type()), 979 type2aelembytes(op->type()), 980 op->klass()->as_register(), 981 *op->stub()->entry()); 982 } 983 __ bind(*op->stub()->continuation()); 984 } 985 986 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 987 ciMethodData *md, ciProfileData *data, 988 Register recv, Register tmp1, Label* update_done) { 989 assert_different_registers(mdo, recv, tmp1); 990 uint i; 991 for (i = 0; i < VirtualCallData::row_limit(); i++) { 992 Label next_test; 993 // See if the receiver is receiver[n]. 994 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 995 mdo_offset_bias); 996 __ ldr(tmp1, receiver_addr); 997 __ verify_klass_ptr(tmp1); 998 __ cmp(recv, tmp1); 999 __ b(next_test, ne); 1000 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 1001 mdo_offset_bias); 1002 __ ldr(tmp1, data_addr); 1003 __ add(tmp1, tmp1, DataLayout::counter_increment); 1004 __ str(tmp1, data_addr); 1005 __ b(*update_done); 1006 __ bind(next_test); 1007 } 1008 1009 // Didn't find receiver; find next empty slot and fill it in 1010 for (i = 0; i < VirtualCallData::row_limit(); i++) { 1011 Label next_test; 1012 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 1013 mdo_offset_bias); 1014 __ ldr(tmp1, recv_addr); 1015 __ cbnz(tmp1, next_test); 1016 __ str(recv, recv_addr); 1017 __ mov(tmp1, DataLayout::counter_increment); 1018 __ str(tmp1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 1019 mdo_offset_bias)); 1020 __ b(*update_done); 1021 __ bind(next_test); 1022 } 1023 } 1024 1025 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 1026 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 1027 md = method->method_data_or_null(); 1028 assert(md != nullptr, "Sanity"); 1029 data = md->bci_to_data(bci); 1030 assert(data != nullptr, "need data for checkcast"); 1031 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1032 if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) { 1033 // The offset is large so bias the mdo by the base of the slot so 1034 // that the ldr can use an immediate offset to reference the slots of the data 1035 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 1036 } 1037 } 1038 1039 // On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null). 1040 void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci, 1041 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias, 1042 Register obj, Register mdo, Register data_val, Label* obj_is_null) { 1043 assert(method != nullptr, "Should have method"); 1044 assert_different_registers(obj, mdo, data_val); 1045 setup_md_access(method, bci, md, data, mdo_offset_bias); 1046 Label not_null; 1047 __ b(not_null, ne); 1048 __ mov_metadata(mdo, md->constant_encoding()); 1049 if (mdo_offset_bias > 0) { 1050 __ mov_slow(data_val, mdo_offset_bias); 1051 __ add(mdo, mdo, data_val); 1052 } 1053 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 1054 __ ldrb(data_val, flags_addr); 1055 __ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant()); 1056 __ strb(data_val, flags_addr); 1057 __ b(*obj_is_null); 1058 __ bind(not_null); 1059 } 1060 1061 void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias, 1062 Register mdo, Register recv, Register value, Register tmp1, 1063 Label* profile_cast_success, Label* profile_cast_failure, 1064 Label* success, Label* failure) { 1065 assert_different_registers(mdo, value, tmp1); 1066 __ bind(*profile_cast_success); 1067 __ mov_metadata(mdo, md->constant_encoding()); 1068 if (mdo_offset_bias > 0) { 1069 __ mov_slow(tmp1, mdo_offset_bias); 1070 __ add(mdo, mdo, tmp1); 1071 } 1072 __ load_klass(recv, value); 1073 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 1074 __ b(*success); 1075 // Cast failure case 1076 __ bind(*profile_cast_failure); 1077 __ mov_metadata(mdo, md->constant_encoding()); 1078 if (mdo_offset_bias > 0) { 1079 __ mov_slow(tmp1, mdo_offset_bias); 1080 __ add(mdo, mdo, tmp1); 1081 } 1082 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 1083 __ ldr(tmp1, data_addr); 1084 __ sub(tmp1, tmp1, DataLayout::counter_increment); 1085 __ str(tmp1, data_addr); 1086 __ b(*failure); 1087 } 1088 1089 // Sets `res` to true, if `cond` holds. 1090 static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) { 1091 __ mov(res, 1, cond); 1092 } 1093 1094 1095 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1096 // TODO: ARM - can be more effective with one more register 1097 switch (op->code()) { 1098 case lir_store_check: { 1099 CodeStub* stub = op->stub(); 1100 Register value = op->object()->as_register(); 1101 Register array = op->array()->as_register(); 1102 Register klass_RInfo = op->tmp1()->as_register(); 1103 Register k_RInfo = op->tmp2()->as_register(); 1104 assert_different_registers(klass_RInfo, k_RInfo, Rtemp); 1105 if (op->should_profile()) { 1106 assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp); 1107 } 1108 1109 // check if it needs to be profiled 1110 ciMethodData* md; 1111 ciProfileData* data; 1112 int mdo_offset_bias = 0; 1113 Label profile_cast_success, profile_cast_failure, done; 1114 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1115 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1116 1117 if (op->should_profile()) { 1118 __ cmp(value, 0); 1119 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done); 1120 } else { 1121 __ cbz(value, done); 1122 } 1123 assert_different_registers(k_RInfo, value); 1124 add_debug_info_for_null_check_here(op->info_for_exception()); 1125 __ load_klass(k_RInfo, array); 1126 __ load_klass(klass_RInfo, value); 1127 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1128 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1129 // check for immediate positive hit 1130 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1131 __ cmp(klass_RInfo, k_RInfo); 1132 __ cond_cmp(Rtemp, k_RInfo, ne); 1133 __ b(*success_target, eq); 1134 // check for immediate negative hit 1135 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1136 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1137 __ b(*failure_target, ne); 1138 // slow case 1139 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1140 __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); 1141 __ cbz(R0, *failure_target); 1142 if (op->should_profile()) { 1143 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1144 if (mdo == value) { 1145 mdo = k_RInfo; 1146 recv = klass_RInfo; 1147 } 1148 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, value, tmp1, 1149 &profile_cast_success, &profile_cast_failure, 1150 &done, stub->entry()); 1151 } 1152 __ bind(done); 1153 break; 1154 } 1155 1156 case lir_checkcast: { 1157 CodeStub* stub = op->stub(); 1158 Register obj = op->object()->as_register(); 1159 Register res = op->result_opr()->as_register(); 1160 Register klass_RInfo = op->tmp1()->as_register(); 1161 Register k_RInfo = op->tmp2()->as_register(); 1162 ciKlass* k = op->klass(); 1163 assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp); 1164 1165 if (stub->is_simple_exception_stub()) { 1166 // TODO: ARM - Late binding is used to prevent confusion of register allocator 1167 assert(stub->is_exception_throw_stub(), "must be"); 1168 ((SimpleExceptionStub*)stub)->set_obj(op->result_opr()); 1169 } 1170 ciMethodData* md; 1171 ciProfileData* data; 1172 int mdo_offset_bias = 0; 1173 1174 Label done; 1175 1176 Label profile_cast_failure, profile_cast_success; 1177 Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry(); 1178 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1179 1180 1181 __ movs(res, obj); 1182 if (op->should_profile()) { 1183 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1184 } else { 1185 __ b(done, eq); 1186 } 1187 if (k->is_loaded()) { 1188 __ mov_metadata(k_RInfo, k->constant_encoding()); 1189 } else if (k_RInfo != obj) { 1190 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1191 __ movs(res, obj); 1192 } else { 1193 // Patching doesn't update "res" register after GC, so do patching first 1194 klass2reg_with_patching(Rtemp, op->info_for_patch()); 1195 __ movs(res, obj); 1196 __ mov(k_RInfo, Rtemp); 1197 } 1198 __ load_klass(klass_RInfo, res, ne); 1199 1200 if (op->fast_check()) { 1201 __ cmp(klass_RInfo, k_RInfo, ne); 1202 __ b(*failure_target, ne); 1203 } else if (k->is_loaded()) { 1204 __ b(*success_target, eq); 1205 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1206 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1207 __ cmp(Rtemp, k_RInfo); 1208 __ b(*failure_target, ne); 1209 } else { 1210 __ cmp(klass_RInfo, k_RInfo); 1211 __ cmp(Rtemp, k_RInfo, ne); 1212 __ b(*success_target, eq); 1213 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1214 __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); 1215 __ cbz(R0, *failure_target); 1216 } 1217 } else { 1218 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1219 __ b(*success_target, eq); 1220 // check for immediate positive hit 1221 __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); 1222 __ cmp(klass_RInfo, k_RInfo); 1223 __ cmp(Rtemp, k_RInfo, ne); 1224 __ b(*success_target, eq); 1225 // check for immediate negative hit 1226 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1227 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1228 __ b(*failure_target, ne); 1229 // slow case 1230 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1231 __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); 1232 __ cbz(R0, *failure_target); 1233 } 1234 1235 if (op->should_profile()) { 1236 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1237 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1238 &profile_cast_success, &profile_cast_failure, 1239 &done, stub->entry()); 1240 } 1241 __ bind(done); 1242 break; 1243 } 1244 1245 case lir_instanceof: { 1246 Register obj = op->object()->as_register(); 1247 Register res = op->result_opr()->as_register(); 1248 Register klass_RInfo = op->tmp1()->as_register(); 1249 Register k_RInfo = op->tmp2()->as_register(); 1250 ciKlass* k = op->klass(); 1251 assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp); 1252 1253 ciMethodData* md; 1254 ciProfileData* data; 1255 int mdo_offset_bias = 0; 1256 1257 Label done; 1258 1259 Label profile_cast_failure, profile_cast_success; 1260 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done; 1261 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1262 1263 __ movs(res, obj); 1264 1265 if (op->should_profile()) { 1266 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); 1267 } else { 1268 __ b(done, eq); 1269 } 1270 1271 if (k->is_loaded()) { 1272 __ mov_metadata(k_RInfo, k->constant_encoding()); 1273 } else { 1274 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res)); 1275 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1276 } 1277 __ load_klass(klass_RInfo, res); 1278 1279 if (!op->should_profile()) { 1280 __ mov(res, 0); 1281 } 1282 1283 if (op->fast_check()) { 1284 __ cmp(klass_RInfo, k_RInfo); 1285 if (!op->should_profile()) { 1286 set_instanceof_result(_masm, res, eq); 1287 } else { 1288 __ b(profile_cast_failure, ne); 1289 } 1290 } else if (k->is_loaded()) { 1291 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); 1292 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { 1293 __ cmp(Rtemp, k_RInfo); 1294 if (!op->should_profile()) { 1295 set_instanceof_result(_masm, res, eq); 1296 } else { 1297 __ b(profile_cast_failure, ne); 1298 } 1299 } else { 1300 __ cmp(klass_RInfo, k_RInfo); 1301 __ cond_cmp(Rtemp, k_RInfo, ne); 1302 if (!op->should_profile()) { 1303 set_instanceof_result(_masm, res, eq); 1304 } 1305 __ b(*success_target, eq); 1306 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1307 __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); 1308 if (!op->should_profile()) { 1309 move_regs(R0, res); 1310 } else { 1311 __ cbz(R0, *failure_target); 1312 } 1313 } 1314 } else { 1315 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1316 // check for immediate positive hit 1317 __ cmp(klass_RInfo, k_RInfo); 1318 if (!op->should_profile()) { 1319 __ ldr(res, Address(klass_RInfo, Rtemp), ne); 1320 __ cond_cmp(res, k_RInfo, ne); 1321 set_instanceof_result(_masm, res, eq); 1322 } else { 1323 __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne); 1324 __ cond_cmp(Rtemp, k_RInfo, ne); 1325 } 1326 __ b(*success_target, eq); 1327 // check for immediate negative hit 1328 if (op->should_profile()) { 1329 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); 1330 } 1331 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); 1332 if (!op->should_profile()) { 1333 __ mov(res, 0, ne); 1334 } 1335 __ b(*failure_target, ne); 1336 // slow case 1337 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); 1338 __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); 1339 if (!op->should_profile()) { 1340 move_regs(R0, res); 1341 } 1342 if (op->should_profile()) { 1343 __ cbz(R0, *failure_target); 1344 } 1345 } 1346 1347 if (op->should_profile()) { 1348 Label done_ok, done_failure; 1349 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; 1350 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, 1351 &profile_cast_success, &profile_cast_failure, 1352 &done_ok, &done_failure); 1353 __ bind(done_failure); 1354 __ mov(res, 0); 1355 __ b(done); 1356 __ bind(done_ok); 1357 __ mov(res, 1); 1358 } 1359 __ bind(done); 1360 break; 1361 } 1362 default: 1363 ShouldNotReachHere(); 1364 } 1365 } 1366 1367 1368 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1369 // if (*addr == cmpval) { 1370 // *addr = newval; 1371 // dest = 1; 1372 // } else { 1373 // dest = 0; 1374 // } 1375 // FIXME: membar_release 1376 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 1377 Register addr = op->addr()->is_register() ? 1378 op->addr()->as_pointer_register() : 1379 op->addr()->as_address_ptr()->base()->as_pointer_register(); 1380 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp"); 1381 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_Opr::illegalOpr(), "unexpected index"); 1382 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 1383 Register cmpval = op->cmp_value()->as_register(); 1384 Register newval = op->new_value()->as_register(); 1385 Register dest = op->result_opr()->as_register(); 1386 assert_different_registers(dest, addr, cmpval, newval, Rtemp); 1387 1388 __ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer 1389 __ mov(dest, 1, eq); 1390 __ mov(dest, 0, ne); 1391 } else if (op->code() == lir_cas_long) { 1392 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 1393 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 1394 Register new_value_lo = op->new_value()->as_register_lo(); 1395 Register new_value_hi = op->new_value()->as_register_hi(); 1396 Register dest = op->result_opr()->as_register(); 1397 Register tmp_lo = op->tmp1()->as_register_lo(); 1398 Register tmp_hi = op->tmp1()->as_register_hi(); 1399 1400 assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr); 1401 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 1402 assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair"); 1403 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1404 assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair"); 1405 __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi, 1406 new_value_lo, new_value_hi, addr, 0); 1407 } else { 1408 Unimplemented(); 1409 } 1410 // FIXME: is full membar really needed instead of just membar_acquire? 1411 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 1412 } 1413 1414 1415 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 1416 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 1417 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on arm"); 1418 1419 AsmCondition acond = al; 1420 AsmCondition ncond = nv; 1421 if (opr1 != opr2) { 1422 switch (condition) { 1423 case lir_cond_equal: acond = eq; ncond = ne; break; 1424 case lir_cond_notEqual: acond = ne; ncond = eq; break; 1425 case lir_cond_less: acond = lt; ncond = ge; break; 1426 case lir_cond_lessEqual: acond = le; ncond = gt; break; 1427 case lir_cond_greaterEqual: acond = ge; ncond = lt; break; 1428 case lir_cond_greater: acond = gt; ncond = le; break; 1429 case lir_cond_aboveEqual: acond = hs; ncond = lo; break; 1430 case lir_cond_belowEqual: acond = ls; ncond = hi; break; 1431 default: ShouldNotReachHere(); 1432 } 1433 } 1434 1435 for (;;) { // two iterations only 1436 if (opr1 == result) { 1437 // do nothing 1438 } else if (opr1->is_single_cpu()) { 1439 __ mov(result->as_register(), opr1->as_register(), acond); 1440 } else if (opr1->is_double_cpu()) { 1441 __ long_move(result->as_register_lo(), result->as_register_hi(), 1442 opr1->as_register_lo(), opr1->as_register_hi(), acond); 1443 } else if (opr1->is_single_stack()) { 1444 __ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond); 1445 } else if (opr1->is_double_stack()) { 1446 __ ldr(result->as_register_lo(), 1447 frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond); 1448 __ ldr(result->as_register_hi(), 1449 frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond); 1450 } else if (opr1->is_illegal()) { 1451 // do nothing: this part of the cmove has been optimized away in the peephole optimizer 1452 } else { 1453 assert(opr1->is_constant(), "must be"); 1454 LIR_Const* c = opr1->as_constant_ptr(); 1455 1456 switch (c->type()) { 1457 case T_INT: 1458 __ mov_slow(result->as_register(), c->as_jint(), acond); 1459 break; 1460 case T_LONG: 1461 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1462 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1463 break; 1464 case T_OBJECT: 1465 __ mov_oop(result->as_register(), c->as_jobject(), 0, acond); 1466 break; 1467 case T_FLOAT: 1468 #ifdef __SOFTFP__ 1469 // not generated now. 1470 __ mov_slow(result->as_register(), c->as_jint(), acond); 1471 #else 1472 __ mov_float(result->as_float_reg(), c->as_jfloat(), acond); 1473 #endif // __SOFTFP__ 1474 break; 1475 case T_DOUBLE: 1476 #ifdef __SOFTFP__ 1477 // not generated now. 1478 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond); 1479 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond); 1480 #else 1481 __ mov_double(result->as_double_reg(), c->as_jdouble(), acond); 1482 #endif // __SOFTFP__ 1483 break; 1484 case T_METADATA: 1485 __ mov_metadata(result->as_register(), c->as_metadata(), acond); 1486 break; 1487 default: 1488 ShouldNotReachHere(); 1489 } 1490 } 1491 1492 // Negate the condition and repeat the algorithm with the second operand 1493 if (opr1 == opr2) { break; } 1494 opr1 = opr2; 1495 acond = ncond; 1496 } 1497 } 1498 1499 #ifdef ASSERT 1500 static int reg_size(LIR_Opr op) { 1501 switch (op->type()) { 1502 case T_FLOAT: 1503 case T_INT: return BytesPerInt; 1504 case T_LONG: 1505 case T_DOUBLE: return BytesPerLong; 1506 case T_OBJECT: 1507 case T_ARRAY: 1508 case T_METADATA: return BytesPerWord; 1509 case T_ADDRESS: 1510 case T_ILLEGAL: // fall through 1511 default: ShouldNotReachHere(); return -1; 1512 } 1513 } 1514 #endif 1515 1516 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1517 assert(info == nullptr, "unused on this code path"); 1518 assert(dest->is_register(), "wrong items state"); 1519 1520 if (right->is_address()) { 1521 // special case for adding shifted/extended register 1522 const Register res = dest->as_pointer_register(); 1523 const Register lreg = left->as_pointer_register(); 1524 const LIR_Address* addr = right->as_address_ptr(); 1525 1526 assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1527 1528 int scale = addr->scale(); 1529 AsmShift shift = lsl; 1530 1531 1532 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be"); 1533 assert(reg_size(addr->base()) == reg_size(dest), "should be"); 1534 assert(reg_size(dest) == wordSize, "should be"); 1535 1536 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale); 1537 switch (code) { 1538 case lir_add: __ add(res, lreg, operand); break; 1539 case lir_sub: __ sub(res, lreg, operand); break; 1540 default: ShouldNotReachHere(); 1541 } 1542 1543 } else if (left->is_address()) { 1544 assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()"); 1545 const LIR_Address* addr = left->as_address_ptr(); 1546 const Register res = dest->as_register(); 1547 const Register rreg = right->as_register(); 1548 assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be"); 1549 __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale())); 1550 1551 } else if (dest->is_single_cpu()) { 1552 assert(left->is_single_cpu(), "unexpected left operand"); 1553 1554 const Register res = dest->as_register(); 1555 const Register lreg = left->as_register(); 1556 1557 if (right->is_single_cpu()) { 1558 const Register rreg = right->as_register(); 1559 switch (code) { 1560 case lir_add: __ add_32(res, lreg, rreg); break; 1561 case lir_sub: __ sub_32(res, lreg, rreg); break; 1562 case lir_mul: __ mul_32(res, lreg, rreg); break; 1563 default: ShouldNotReachHere(); 1564 } 1565 } else { 1566 assert(right->is_constant(), "must be"); 1567 const jint c = right->as_constant_ptr()->as_jint(); 1568 if (!Assembler::is_arith_imm_in_range(c)) { 1569 BAILOUT("illegal arithmetic operand"); 1570 } 1571 switch (code) { 1572 case lir_add: __ add_32(res, lreg, c); break; 1573 case lir_sub: __ sub_32(res, lreg, c); break; 1574 default: ShouldNotReachHere(); 1575 } 1576 } 1577 1578 } else if (dest->is_double_cpu()) { 1579 Register res_lo = dest->as_register_lo(); 1580 Register res_hi = dest->as_register_hi(); 1581 Register lreg_lo = left->as_register_lo(); 1582 Register lreg_hi = left->as_register_hi(); 1583 if (right->is_double_cpu()) { 1584 Register rreg_lo = right->as_register_lo(); 1585 Register rreg_hi = right->as_register_hi(); 1586 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1587 res_lo = Rtemp; 1588 } 1589 switch (code) { 1590 case lir_add: 1591 __ adds(res_lo, lreg_lo, rreg_lo); 1592 __ adc(res_hi, lreg_hi, rreg_hi); 1593 break; 1594 case lir_sub: 1595 __ subs(res_lo, lreg_lo, rreg_lo); 1596 __ sbc(res_hi, lreg_hi, rreg_hi); 1597 break; 1598 default: 1599 ShouldNotReachHere(); 1600 } 1601 } else { 1602 assert(right->is_constant(), "must be"); 1603 assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range"); 1604 const jint c = (jint) right->as_constant_ptr()->as_jlong(); 1605 if (res_lo == lreg_hi) { 1606 res_lo = Rtemp; 1607 } 1608 switch (code) { 1609 case lir_add: 1610 __ adds(res_lo, lreg_lo, c); 1611 __ adc(res_hi, lreg_hi, 0); 1612 break; 1613 case lir_sub: 1614 __ subs(res_lo, lreg_lo, c); 1615 __ sbc(res_hi, lreg_hi, 0); 1616 break; 1617 default: 1618 ShouldNotReachHere(); 1619 } 1620 } 1621 move_regs(res_lo, dest->as_register_lo()); 1622 1623 } else if (dest->is_single_fpu()) { 1624 assert(left->is_single_fpu(), "must be"); 1625 assert(right->is_single_fpu(), "must be"); 1626 const FloatRegister res = dest->as_float_reg(); 1627 const FloatRegister lreg = left->as_float_reg(); 1628 const FloatRegister rreg = right->as_float_reg(); 1629 switch (code) { 1630 case lir_add: __ add_float(res, lreg, rreg); break; 1631 case lir_sub: __ sub_float(res, lreg, rreg); break; 1632 case lir_mul: __ mul_float(res, lreg, rreg); break; 1633 case lir_div: __ div_float(res, lreg, rreg); break; 1634 default: ShouldNotReachHere(); 1635 } 1636 } else if (dest->is_double_fpu()) { 1637 assert(left->is_double_fpu(), "must be"); 1638 assert(right->is_double_fpu(), "must be"); 1639 const FloatRegister res = dest->as_double_reg(); 1640 const FloatRegister lreg = left->as_double_reg(); 1641 const FloatRegister rreg = right->as_double_reg(); 1642 switch (code) { 1643 case lir_add: __ add_double(res, lreg, rreg); break; 1644 case lir_sub: __ sub_double(res, lreg, rreg); break; 1645 case lir_mul: __ mul_double(res, lreg, rreg); break; 1646 case lir_div: __ div_double(res, lreg, rreg); break; 1647 default: ShouldNotReachHere(); 1648 } 1649 } else { 1650 ShouldNotReachHere(); 1651 } 1652 } 1653 1654 1655 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 1656 switch (code) { 1657 case lir_abs: 1658 __ abs_double(dest->as_double_reg(), value->as_double_reg()); 1659 break; 1660 case lir_sqrt: 1661 __ sqrt_double(dest->as_double_reg(), value->as_double_reg()); 1662 break; 1663 default: 1664 ShouldNotReachHere(); 1665 } 1666 } 1667 1668 1669 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1670 assert(dest->is_register(), "wrong items state"); 1671 assert(left->is_register(), "wrong items state"); 1672 1673 if (dest->is_single_cpu()) { 1674 1675 const Register res = dest->as_register(); 1676 const Register lreg = left->as_register(); 1677 1678 if (right->is_single_cpu()) { 1679 const Register rreg = right->as_register(); 1680 switch (code) { 1681 case lir_logic_and: __ and_32(res, lreg, rreg); break; 1682 case lir_logic_or: __ orr_32(res, lreg, rreg); break; 1683 case lir_logic_xor: __ eor_32(res, lreg, rreg); break; 1684 default: ShouldNotReachHere(); 1685 } 1686 } else { 1687 assert(right->is_constant(), "must be"); 1688 const uint c = (uint)right->as_constant_ptr()->as_jint(); 1689 if (!Assembler::is_arith_imm_in_range(c)) { 1690 BAILOUT("illegal arithmetic operand"); 1691 } 1692 switch (code) { 1693 case lir_logic_and: __ and_32(res, lreg, c); break; 1694 case lir_logic_or: __ orr_32(res, lreg, c); break; 1695 case lir_logic_xor: __ eor_32(res, lreg, c); break; 1696 default: ShouldNotReachHere(); 1697 } 1698 } 1699 } else { 1700 assert(dest->is_double_cpu(), "should be"); 1701 Register res_lo = dest->as_register_lo(); 1702 1703 assert (dest->type() == T_LONG, "unexpected result type"); 1704 assert (left->type() == T_LONG, "unexpected left type"); 1705 assert (right->type() == T_LONG, "unexpected right type"); 1706 1707 const Register res_hi = dest->as_register_hi(); 1708 const Register lreg_lo = left->as_register_lo(); 1709 const Register lreg_hi = left->as_register_hi(); 1710 1711 if (right->is_register()) { 1712 const Register rreg_lo = right->as_register_lo(); 1713 const Register rreg_hi = right->as_register_hi(); 1714 if (res_lo == lreg_hi || res_lo == rreg_hi) { 1715 res_lo = Rtemp; // Temp register helps to avoid overlap between result and input 1716 } 1717 switch (code) { 1718 case lir_logic_and: 1719 __ andr(res_lo, lreg_lo, rreg_lo); 1720 __ andr(res_hi, lreg_hi, rreg_hi); 1721 break; 1722 case lir_logic_or: 1723 __ orr(res_lo, lreg_lo, rreg_lo); 1724 __ orr(res_hi, lreg_hi, rreg_hi); 1725 break; 1726 case lir_logic_xor: 1727 __ eor(res_lo, lreg_lo, rreg_lo); 1728 __ eor(res_hi, lreg_hi, rreg_hi); 1729 break; 1730 default: 1731 ShouldNotReachHere(); 1732 } 1733 move_regs(res_lo, dest->as_register_lo()); 1734 } else { 1735 assert(right->is_constant(), "must be"); 1736 const jint c_lo = (jint) right->as_constant_ptr()->as_jlong(); 1737 const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32); 1738 // Case for logic_or from do_ClassIDIntrinsic() 1739 if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) { 1740 switch (code) { 1741 case lir_logic_and: 1742 __ andr(res_lo, lreg_lo, c_lo); 1743 __ mov(res_hi, 0); 1744 break; 1745 case lir_logic_or: 1746 __ orr(res_lo, lreg_lo, c_lo); 1747 break; 1748 case lir_logic_xor: 1749 __ eor(res_lo, lreg_lo, c_lo); 1750 break; 1751 default: 1752 ShouldNotReachHere(); 1753 } 1754 } else if (code == lir_logic_and && 1755 c_hi == -1 && 1756 (AsmOperand::is_rotated_imm(c_lo) || 1757 AsmOperand::is_rotated_imm(~c_lo))) { 1758 // Another case which handles logic_and from do_ClassIDIntrinsic() 1759 if (AsmOperand::is_rotated_imm(c_lo)) { 1760 __ andr(res_lo, lreg_lo, c_lo); 1761 } else { 1762 __ bic(res_lo, lreg_lo, ~c_lo); 1763 } 1764 if (res_hi != lreg_hi) { 1765 __ mov(res_hi, lreg_hi); 1766 } 1767 } else { 1768 BAILOUT("64 bit constant cannot be inlined"); 1769 } 1770 } 1771 } 1772 } 1773 1774 1775 1776 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1777 if (opr1->is_single_cpu()) { 1778 if (opr2->is_constant()) { 1779 switch (opr2->as_constant_ptr()->type()) { 1780 case T_INT: { 1781 const jint c = opr2->as_constant_ptr()->as_jint(); 1782 if (Assembler::is_arith_imm_in_range(c)) { 1783 __ cmp_32(opr1->as_register(), c); 1784 } else if (Assembler::is_arith_imm_in_range(-c)) { 1785 __ cmn_32(opr1->as_register(), -c); 1786 } else { 1787 // This can happen when compiling lookupswitch 1788 __ mov_slow(Rtemp, c); 1789 __ cmp_32(opr1->as_register(), Rtemp); 1790 } 1791 break; 1792 } 1793 case T_OBJECT: 1794 assert(opr2->as_constant_ptr()->as_jobject() == nullptr, "cannot handle otherwise"); 1795 __ cmp(opr1->as_register(), 0); 1796 break; 1797 case T_METADATA: 1798 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests"); 1799 assert(opr2->as_constant_ptr()->as_metadata() == nullptr, "cannot handle otherwise"); 1800 __ cmp(opr1->as_register(), 0); 1801 break; 1802 default: 1803 ShouldNotReachHere(); 1804 } 1805 } else if (opr2->is_single_cpu()) { 1806 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1807 assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY, "incompatibe type"); 1808 __ cmpoop(opr1->as_register(), opr2->as_register()); 1809 } else if (opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) { 1810 assert(opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type"); 1811 __ cmp(opr1->as_register(), opr2->as_register()); 1812 } else { 1813 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type"); 1814 __ cmp_32(opr1->as_register(), opr2->as_register()); 1815 } 1816 } else { 1817 ShouldNotReachHere(); 1818 } 1819 } else if (opr1->is_double_cpu()) { 1820 Register xlo = opr1->as_register_lo(); 1821 Register xhi = opr1->as_register_hi(); 1822 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1823 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise"); 1824 __ orrs(Rtemp, xlo, xhi); 1825 } else if (opr2->is_register()) { 1826 Register ylo = opr2->as_register_lo(); 1827 Register yhi = opr2->as_register_hi(); 1828 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1829 __ teq(xhi, yhi); 1830 __ teq(xlo, ylo, eq); 1831 } else { 1832 __ subs(Rtemp, xlo, ylo); 1833 __ sbcs(Rtemp, xhi, yhi); 1834 } 1835 } else { 1836 ShouldNotReachHere(); 1837 } 1838 } else if (opr1->is_single_fpu()) { 1839 if (opr2->is_constant()) { 1840 assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise"); 1841 __ cmp_zero_float(opr1->as_float_reg()); 1842 } else { 1843 __ cmp_float(opr1->as_float_reg(), opr2->as_float_reg()); 1844 } 1845 } else if (opr1->is_double_fpu()) { 1846 if (opr2->is_constant()) { 1847 assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise"); 1848 __ cmp_zero_double(opr1->as_double_reg()); 1849 } else { 1850 __ cmp_double(opr1->as_double_reg(), opr2->as_double_reg()); 1851 } 1852 } else { 1853 ShouldNotReachHere(); 1854 } 1855 } 1856 1857 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1858 const Register res = dst->as_register(); 1859 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1860 comp_op(lir_cond_unknown, left, right, op); 1861 __ fmstat(); 1862 if (code == lir_ucmp_fd2i) { // unordered is less 1863 __ mvn(res, 0, lt); 1864 __ mov(res, 1, ge); 1865 } else { // unordered is greater 1866 __ mov(res, 1, cs); 1867 __ mvn(res, 0, cc); 1868 } 1869 __ mov(res, 0, eq); 1870 1871 } else { 1872 assert(code == lir_cmp_l2i, "must be"); 1873 1874 Label done; 1875 const Register xlo = left->as_register_lo(); 1876 const Register xhi = left->as_register_hi(); 1877 const Register ylo = right->as_register_lo(); 1878 const Register yhi = right->as_register_hi(); 1879 __ cmp(xhi, yhi); 1880 __ mov(res, 1, gt); 1881 __ mvn(res, 0, lt); 1882 __ b(done, ne); 1883 __ subs(res, xlo, ylo); 1884 __ mov(res, 1, hi); 1885 __ mvn(res, 0, lo); 1886 __ bind(done); 1887 } 1888 } 1889 1890 1891 void LIR_Assembler::align_call(LIR_Code code) { 1892 // Not needed 1893 } 1894 1895 1896 void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) { 1897 int ret_addr_offset = __ patchable_call(op->addr(), rtype); 1898 assert(ret_addr_offset == __ offset(), "embedded return address not allowed"); 1899 add_call_info_here(op->info()); 1900 } 1901 1902 1903 void LIR_Assembler::ic_call(LIR_OpJavaCall *op) { 1904 bool near_range = __ cache_fully_reachable(); 1905 address oop_address = pc(); 1906 1907 bool use_movw = VM_Version::supports_movw(); 1908 1909 // Ricklass may contain something that is not a metadata pointer so 1910 // mov_metadata can't be used 1911 InlinedAddress value((address)Universe::non_oop_word()); 1912 InlinedAddress addr(op->addr()); 1913 if (use_movw) { 1914 __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff); 1915 __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16); 1916 } else { 1917 // No movw/movt, must be load a pc relative value but no 1918 // relocation so no metadata table to load from. 1919 // Use a b instruction rather than a bl, inline constant after the 1920 // branch, use a PC relative ldr to load the constant, arrange for 1921 // the call to return after the constant(s). 1922 __ ldr_literal(Ricklass, value); 1923 } 1924 __ relocate(virtual_call_Relocation::spec(oop_address)); 1925 if (near_range && use_movw) { 1926 __ bl(op->addr()); 1927 } else { 1928 Label call_return; 1929 __ adr(LR, call_return); 1930 if (near_range) { 1931 __ b(op->addr()); 1932 } else { 1933 __ indirect_jump(addr, Rtemp); 1934 __ bind_literal(addr); 1935 } 1936 if (!use_movw) { 1937 __ bind_literal(value); 1938 } 1939 __ bind(call_return); 1940 } 1941 add_call_info(code_offset(), op->info()); 1942 } 1943 1944 void LIR_Assembler::emit_static_call_stub() { 1945 address call_pc = __ pc(); 1946 address stub = __ start_a_stub(call_stub_size()); 1947 if (stub == nullptr) { 1948 BAILOUT("static call stub overflow"); 1949 } 1950 1951 DEBUG_ONLY(int offset = code_offset();) 1952 1953 InlinedMetadata metadata_literal(nullptr); 1954 __ relocate(static_stub_Relocation::spec(call_pc)); 1955 // If not a single instruction, NativeMovConstReg::next_instruction_address() 1956 // must jump over the whole following ldr_literal. 1957 // (See CompiledDirectCall::set_to_interpreted()) 1958 #ifdef ASSERT 1959 address ldr_site = __ pc(); 1960 #endif 1961 __ ldr_literal(Rmethod, metadata_literal); 1962 assert(nativeMovConstReg_at(ldr_site)->next_instruction_address() == __ pc(), "Fix ldr_literal or its parsing"); 1963 bool near_range = __ cache_fully_reachable(); 1964 InlinedAddress dest((address)-1); 1965 if (near_range) { 1966 address branch_site = __ pc(); 1967 __ b(branch_site); // b to self maps to special NativeJump -1 destination 1968 } else { 1969 __ indirect_jump(dest, Rtemp); 1970 } 1971 __ bind_literal(metadata_literal); // includes spec_for_immediate reloc 1972 if (!near_range) { 1973 __ bind_literal(dest); // special NativeJump -1 destination 1974 } 1975 1976 assert(code_offset() - offset <= call_stub_size(), "overflow"); 1977 __ end_a_stub(); 1978 } 1979 1980 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1981 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1982 assert(exceptionPC->as_register() == Rexception_pc, "must match"); 1983 info->add_register_oop(exceptionOop); 1984 1985 C1StubId handle_id = compilation()->has_fpu_code() ? 1986 C1StubId::handle_exception_id : 1987 C1StubId::handle_exception_nofpu_id; 1988 Label return_address; 1989 __ adr(Rexception_pc, return_address); 1990 __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type); 1991 __ bind(return_address); 1992 add_call_info_here(info); // for exception handler 1993 } 1994 1995 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1996 assert(exceptionOop->as_register() == Rexception_obj, "must match"); 1997 __ b(_unwind_handler_entry); 1998 } 1999 2000 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2001 AsmShift shift = lsl; 2002 switch (code) { 2003 case lir_shl: shift = lsl; break; 2004 case lir_shr: shift = asr; break; 2005 case lir_ushr: shift = lsr; break; 2006 default: ShouldNotReachHere(); 2007 } 2008 2009 if (dest->is_single_cpu()) { 2010 __ andr(Rtemp, count->as_register(), 31); 2011 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp)); 2012 } else if (dest->is_double_cpu()) { 2013 Register dest_lo = dest->as_register_lo(); 2014 Register dest_hi = dest->as_register_hi(); 2015 Register src_lo = left->as_register_lo(); 2016 Register src_hi = left->as_register_hi(); 2017 Register Rcount = count->as_register(); 2018 // Resolve possible register conflicts 2019 if (shift == lsl && dest_hi == src_lo) { 2020 dest_hi = Rtemp; 2021 } else if (shift != lsl && dest_lo == src_hi) { 2022 dest_lo = Rtemp; 2023 } else if (dest_lo == src_lo && dest_hi == src_hi) { 2024 dest_lo = Rtemp; 2025 } else if (dest_lo == Rcount || dest_hi == Rcount) { 2026 Rcount = Rtemp; 2027 } 2028 __ andr(Rcount, count->as_register(), 63); 2029 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount); 2030 move_regs(dest_lo, dest->as_register_lo()); 2031 move_regs(dest_hi, dest->as_register_hi()); 2032 } else { 2033 ShouldNotReachHere(); 2034 } 2035 } 2036 2037 2038 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2039 AsmShift shift = lsl; 2040 switch (code) { 2041 case lir_shl: shift = lsl; break; 2042 case lir_shr: shift = asr; break; 2043 case lir_ushr: shift = lsr; break; 2044 default: ShouldNotReachHere(); 2045 } 2046 2047 if (dest->is_single_cpu()) { 2048 count &= 31; 2049 if (count != 0) { 2050 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count)); 2051 } else { 2052 move_regs(left->as_register(), dest->as_register()); 2053 } 2054 } else if (dest->is_double_cpu()) { 2055 count &= 63; 2056 if (count != 0) { 2057 Register dest_lo = dest->as_register_lo(); 2058 Register dest_hi = dest->as_register_hi(); 2059 Register src_lo = left->as_register_lo(); 2060 Register src_hi = left->as_register_hi(); 2061 // Resolve possible register conflicts 2062 if (shift == lsl && dest_hi == src_lo) { 2063 dest_hi = Rtemp; 2064 } else if (shift != lsl && dest_lo == src_hi) { 2065 dest_lo = Rtemp; 2066 } 2067 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count); 2068 move_regs(dest_lo, dest->as_register_lo()); 2069 move_regs(dest_hi, dest->as_register_hi()); 2070 } else { 2071 __ long_move(dest->as_register_lo(), dest->as_register_hi(), 2072 left->as_register_lo(), left->as_register_hi()); 2073 } 2074 } else { 2075 ShouldNotReachHere(); 2076 } 2077 } 2078 2079 2080 // Saves 4 given registers in reserved argument area. 2081 void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2082 verify_reserved_argument_area_size(4); 2083 __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4)); 2084 } 2085 2086 // Restores 4 given registers from reserved argument area. 2087 void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) { 2088 __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback); 2089 } 2090 2091 2092 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2093 ciArrayKlass* default_type = op->expected_type(); 2094 Register src = op->src()->as_register(); 2095 Register src_pos = op->src_pos()->as_register(); 2096 Register dst = op->dst()->as_register(); 2097 Register dst_pos = op->dst_pos()->as_register(); 2098 Register length = op->length()->as_register(); 2099 Register tmp = op->tmp()->as_register(); 2100 Register tmp2 = Rtemp; 2101 2102 assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption"); 2103 2104 CodeStub* stub = op->stub(); 2105 2106 int flags = op->flags(); 2107 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 2108 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2109 2110 // If we don't know anything or it's an object array, just go through the generic arraycopy 2111 if (default_type == nullptr) { 2112 2113 // save arguments, because they will be killed by a runtime call 2114 save_in_reserved_area(R0, R1, R2, R3); 2115 2116 // pass length argument on SP[0] 2117 __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment 2118 2119 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2120 assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); 2121 #ifndef PRODUCT 2122 if (PrintC1Statistics) { 2123 __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2); 2124 } 2125 #endif // !PRODUCT 2126 // the stub is in the code cache so close enough 2127 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2128 2129 __ add(SP, SP, 2*wordSize); 2130 2131 __ cbz_32(R0, *stub->continuation()); 2132 2133 __ mvn_32(tmp, R0); 2134 restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only 2135 __ sub_32(length, length, tmp); 2136 __ add_32(src_pos, src_pos, tmp); 2137 __ add_32(dst_pos, dst_pos, tmp); 2138 2139 __ b(*stub->entry()); 2140 2141 __ bind(*stub->continuation()); 2142 return; 2143 } 2144 2145 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), 2146 "must be true at this point"); 2147 int elem_size = type2aelembytes(basic_type); 2148 int shift = exact_log2(elem_size); 2149 2150 // Check for null 2151 if (flags & LIR_OpArrayCopy::src_null_check) { 2152 if (flags & LIR_OpArrayCopy::dst_null_check) { 2153 __ cmp(src, 0); 2154 __ cond_cmp(dst, 0, ne); // make one instruction shorter if both checks are needed 2155 __ b(*stub->entry(), eq); 2156 } else { 2157 __ cbz(src, *stub->entry()); 2158 } 2159 } else if (flags & LIR_OpArrayCopy::dst_null_check) { 2160 __ cbz(dst, *stub->entry()); 2161 } 2162 2163 // If the compiler was not able to prove that exact type of the source or the destination 2164 // of the arraycopy is an array type, check at runtime if the source or the destination is 2165 // an instance type. 2166 if (flags & LIR_OpArrayCopy::type_check) { 2167 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2168 __ load_klass(tmp, dst); 2169 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2170 __ mov_slow(tmp, Klass::_lh_neutral_value); 2171 __ cmp_32(tmp2, tmp); 2172 __ b(*stub->entry(), ge); 2173 } 2174 2175 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2176 __ load_klass(tmp, src); 2177 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2178 __ mov_slow(tmp, Klass::_lh_neutral_value); 2179 __ cmp_32(tmp2, tmp); 2180 __ b(*stub->entry(), ge); 2181 } 2182 } 2183 2184 // Check if negative 2185 const int all_positive_checks = LIR_OpArrayCopy::src_pos_positive_check | 2186 LIR_OpArrayCopy::dst_pos_positive_check | 2187 LIR_OpArrayCopy::length_positive_check; 2188 switch (flags & all_positive_checks) { 2189 case LIR_OpArrayCopy::src_pos_positive_check: 2190 __ branch_if_negative_32(src_pos, *stub->entry()); 2191 break; 2192 case LIR_OpArrayCopy::dst_pos_positive_check: 2193 __ branch_if_negative_32(dst_pos, *stub->entry()); 2194 break; 2195 case LIR_OpArrayCopy::length_positive_check: 2196 __ branch_if_negative_32(length, *stub->entry()); 2197 break; 2198 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::dst_pos_positive_check: 2199 __ branch_if_any_negative_32(src_pos, dst_pos, tmp, *stub->entry()); 2200 break; 2201 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2202 __ branch_if_any_negative_32(src_pos, length, tmp, *stub->entry()); 2203 break; 2204 case LIR_OpArrayCopy::dst_pos_positive_check | LIR_OpArrayCopy::length_positive_check: 2205 __ branch_if_any_negative_32(dst_pos, length, tmp, *stub->entry()); 2206 break; 2207 case all_positive_checks: 2208 __ branch_if_any_negative_32(src_pos, dst_pos, length, tmp, *stub->entry()); 2209 break; 2210 default: 2211 assert((flags & all_positive_checks) == 0, "the last option"); 2212 } 2213 2214 // Range checks 2215 if (flags & LIR_OpArrayCopy::src_range_check) { 2216 __ ldr_s32(tmp2, Address(src, arrayOopDesc::length_offset_in_bytes())); 2217 __ add_32(tmp, src_pos, length); 2218 __ cmp_32(tmp, tmp2); 2219 __ b(*stub->entry(), hi); 2220 } 2221 if (flags & LIR_OpArrayCopy::dst_range_check) { 2222 __ ldr_s32(tmp2, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2223 __ add_32(tmp, dst_pos, length); 2224 __ cmp_32(tmp, tmp2); 2225 __ b(*stub->entry(), hi); 2226 } 2227 2228 // Check if src and dst are of the same type 2229 if (flags & LIR_OpArrayCopy::type_check) { 2230 // We don't know the array types are compatible 2231 if (basic_type != T_OBJECT) { 2232 // Simple test for basic type arrays 2233 if (UseCompressedClassPointers) { 2234 // We don't need decode because we just need to compare 2235 __ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes())); 2236 __ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes())); 2237 __ cmp_32(tmp, tmp2); 2238 } else { 2239 __ load_klass(tmp, src); 2240 __ load_klass(tmp2, dst); 2241 __ cmp(tmp, tmp2); 2242 } 2243 __ b(*stub->entry(), ne); 2244 } else { 2245 // For object arrays, if src is a sub class of dst then we can 2246 // safely do the copy. 2247 Label cont, slow; 2248 2249 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2250 2251 __ load_klass(tmp, src); 2252 __ load_klass(tmp2, dst); 2253 2254 // We are at a call so all live registers are saved before we 2255 // get here 2256 assert_different_registers(tmp, tmp2, R6, altFP_7_11); 2257 2258 __ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == nullptr ? stub->entry() : &slow, nullptr); 2259 2260 __ mov(R6, R0); 2261 __ mov(altFP_7_11, R1); 2262 __ mov(R0, tmp); 2263 __ mov(R1, tmp2); 2264 __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp 2265 __ cmp_32(R0, 0); 2266 __ mov(R0, R6); 2267 __ mov(R1, altFP_7_11); 2268 2269 if (copyfunc_addr != nullptr) { // use stub if available 2270 // src is not a sub class of dst so we have to do a 2271 // per-element check. 2272 2273 __ b(cont, ne); 2274 2275 __ bind(slow); 2276 2277 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2278 if ((flags & mask) != mask) { 2279 // Check that at least both of them object arrays. 2280 assert(flags & mask, "one of the two should be known to be an object array"); 2281 2282 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2283 __ load_klass(tmp, src); 2284 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2285 __ load_klass(tmp, dst); 2286 } 2287 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2288 2289 __ ldr_u32(tmp2, Address(tmp, lh_offset)); 2290 2291 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2292 __ mov_slow(tmp, objArray_lh); 2293 __ cmp_32(tmp, tmp2); 2294 __ b(*stub->entry(), ne); 2295 } 2296 2297 save_in_reserved_area(R0, R1, R2, R3); 2298 2299 Register src_ptr = R0; 2300 Register dst_ptr = R1; 2301 Register len = R2; 2302 Register chk_off = R3; 2303 Register super_k = tmp; 2304 2305 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2306 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2307 2308 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2309 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2310 __ load_klass(tmp, dst); 2311 2312 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2313 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2314 2315 __ ldr(super_k, Address(tmp, ek_offset)); 2316 2317 __ mov(len, length); 2318 __ ldr_u32(chk_off, Address(super_k, sco_offset)); 2319 __ push(super_k); 2320 2321 __ call(copyfunc_addr, relocInfo::runtime_call_type); 2322 2323 #ifndef PRODUCT 2324 if (PrintC1Statistics) { 2325 Label failed; 2326 __ cbnz_32(R0, failed); 2327 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2); 2328 __ bind(failed); 2329 } 2330 #endif // PRODUCT 2331 2332 __ add(SP, SP, wordSize); // Drop super_k argument 2333 2334 __ cbz_32(R0, *stub->continuation()); 2335 __ mvn_32(tmp, R0); 2336 2337 // load saved arguments in slow case only 2338 restore_from_reserved_area(R0, R1, R2, R3); 2339 2340 __ sub_32(length, length, tmp); 2341 __ add_32(src_pos, src_pos, tmp); 2342 __ add_32(dst_pos, dst_pos, tmp); 2343 2344 #ifndef PRODUCT 2345 if (PrintC1Statistics) { 2346 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2); 2347 } 2348 #endif 2349 2350 __ b(*stub->entry()); 2351 2352 __ bind(cont); 2353 } else { 2354 __ b(*stub->entry(), eq); 2355 __ bind(cont); 2356 } 2357 } 2358 } 2359 2360 #ifndef PRODUCT 2361 if (PrintC1Statistics) { 2362 address counter = Runtime1::arraycopy_count_address(basic_type); 2363 __ inc_counter(counter, tmp, tmp2); 2364 } 2365 #endif // !PRODUCT 2366 2367 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2368 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2369 const char *name; 2370 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2371 2372 Register src_ptr = R0; 2373 Register dst_ptr = R1; 2374 Register len = R2; 2375 2376 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2377 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); 2378 2379 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2380 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift); 2381 2382 __ mov(len, length); 2383 2384 __ call(entry, relocInfo::runtime_call_type); 2385 2386 __ bind(*stub->continuation()); 2387 } 2388 2389 #ifdef ASSERT 2390 // emit run-time assertion 2391 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2392 assert(op->code() == lir_assert, "must be"); 2393 2394 if (op->in_opr1()->is_valid()) { 2395 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 2396 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 2397 } else { 2398 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 2399 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 2400 } 2401 2402 Label ok; 2403 if (op->condition() != lir_cond_always) { 2404 AsmCondition acond = al; 2405 switch (op->condition()) { 2406 case lir_cond_equal: acond = eq; break; 2407 case lir_cond_notEqual: acond = ne; break; 2408 case lir_cond_less: acond = lt; break; 2409 case lir_cond_lessEqual: acond = le; break; 2410 case lir_cond_greaterEqual: acond = ge; break; 2411 case lir_cond_greater: acond = gt; break; 2412 case lir_cond_aboveEqual: acond = hs; break; 2413 case lir_cond_belowEqual: acond = ls; break; 2414 default: ShouldNotReachHere(); 2415 } 2416 __ b(ok, acond); 2417 } 2418 if (op->halt()) { 2419 const char* str = __ code_string(op->msg()); 2420 __ stop(str); 2421 } else { 2422 breakpoint(); 2423 } 2424 __ bind(ok); 2425 } 2426 #endif // ASSERT 2427 2428 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2429 fatal("CRC32 intrinsic is not implemented on this platform"); 2430 } 2431 2432 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2433 Register obj = op->obj_opr()->as_pointer_register(); 2434 Register hdr = op->hdr_opr()->as_pointer_register(); 2435 Register lock = op->lock_opr()->as_pointer_register(); 2436 2437 if (LockingMode == LM_MONITOR) { 2438 if (op->info() != nullptr) { 2439 add_debug_info_for_null_check_here(op->info()); 2440 __ null_check(obj); 2441 } 2442 __ b(*op->stub()->entry()); 2443 } else if (op->code() == lir_lock) { 2444 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2445 int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2446 if (op->info() != nullptr) { 2447 add_debug_info_for_null_check(null_check_offset, op->info()); 2448 } 2449 } else if (op->code() == lir_unlock) { 2450 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2451 } else { 2452 ShouldNotReachHere(); 2453 } 2454 __ bind(*op->stub()->continuation()); 2455 } 2456 2457 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 2458 Register obj = op->obj()->as_pointer_register(); 2459 Register result = op->result_opr()->as_pointer_register(); 2460 2461 CodeEmitInfo* info = op->info(); 2462 if (info != nullptr) { 2463 add_debug_info_for_null_check_here(info); 2464 } 2465 2466 if (UseCompressedClassPointers) { // On 32 bit arm?? 2467 __ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2468 } else { 2469 __ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2470 } 2471 } 2472 2473 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2474 ciMethod* method = op->profiled_method(); 2475 int bci = op->profiled_bci(); 2476 ciMethod* callee = op->profiled_callee(); 2477 2478 // Update counter for all call types 2479 ciMethodData* md = method->method_data_or_null(); 2480 assert(md != nullptr, "Sanity"); 2481 ciProfileData* data = md->bci_to_data(bci); 2482 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 2483 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2484 Register mdo = op->mdo()->as_register(); 2485 assert(op->tmp1()->is_register(), "tmp1 must be allocated"); 2486 Register tmp1 = op->tmp1()->as_pointer_register(); 2487 assert_different_registers(mdo, tmp1); 2488 __ mov_metadata(mdo, md->constant_encoding()); 2489 int mdo_offset_bias = 0; 2490 int max_offset = 4096; 2491 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) { 2492 // The offset is large so bias the mdo by the base of the slot so 2493 // that the ldr can use an immediate offset to reference the slots of the data 2494 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2495 __ mov_slow(tmp1, mdo_offset_bias); 2496 __ add(mdo, mdo, tmp1); 2497 } 2498 2499 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2500 // Perform additional virtual call profiling for invokevirtual and 2501 // invokeinterface bytecodes 2502 if (op->should_profile_receiver_type()) { 2503 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2504 Register recv = op->recv()->as_register(); 2505 assert_different_registers(mdo, tmp1, recv); 2506 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2507 ciKlass* known_klass = op->known_holder(); 2508 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 2509 // We know the type that will be seen at this call site; we can 2510 // statically update the MethodData* rather than needing to do 2511 // dynamic tests on the receiver type 2512 2513 // NOTE: we should probably put a lock around this search to 2514 // avoid collisions by concurrent compilations 2515 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2516 uint i; 2517 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2518 ciKlass* receiver = vc_data->receiver(i); 2519 if (known_klass->equals(receiver)) { 2520 Address data_addr(mdo, md->byte_offset_of_slot(data, 2521 VirtualCallData::receiver_count_offset(i)) - 2522 mdo_offset_bias); 2523 __ ldr(tmp1, data_addr); 2524 __ add(tmp1, tmp1, DataLayout::counter_increment); 2525 __ str(tmp1, data_addr); 2526 return; 2527 } 2528 } 2529 2530 // Receiver type not found in profile data; select an empty slot 2531 2532 // Note that this is less efficient than it should be because it 2533 // always does a write to the receiver part of the 2534 // VirtualCallData rather than just the first time 2535 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2536 ciKlass* receiver = vc_data->receiver(i); 2537 if (receiver == nullptr) { 2538 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 2539 mdo_offset_bias); 2540 __ mov_metadata(tmp1, known_klass->constant_encoding()); 2541 __ str(tmp1, recv_addr); 2542 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 2543 mdo_offset_bias); 2544 __ ldr(tmp1, data_addr); 2545 __ add(tmp1, tmp1, DataLayout::counter_increment); 2546 __ str(tmp1, data_addr); 2547 return; 2548 } 2549 } 2550 } else { 2551 __ load_klass(recv, recv); 2552 Label update_done; 2553 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2554 // Receiver did not match any saved receiver and there is no empty row for it. 2555 // Increment total counter to indicate polymorphic case. 2556 __ ldr(tmp1, counter_addr); 2557 __ add(tmp1, tmp1, DataLayout::counter_increment); 2558 __ str(tmp1, counter_addr); 2559 2560 __ bind(update_done); 2561 } 2562 } else { 2563 // Static call 2564 __ ldr(tmp1, counter_addr); 2565 __ add(tmp1, tmp1, DataLayout::counter_increment); 2566 __ str(tmp1, counter_addr); 2567 } 2568 } 2569 2570 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2571 fatal("Type profiling not implemented on this platform"); 2572 } 2573 2574 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 2575 Unimplemented(); 2576 } 2577 2578 2579 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 2580 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2581 __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp()); 2582 } 2583 2584 2585 void LIR_Assembler::align_backward_branch_target() { 2586 // Some ARM processors do better with 8-byte branch target alignment 2587 __ align(8); 2588 } 2589 2590 2591 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2592 // tmp must be unused 2593 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2594 2595 if (left->is_single_cpu()) { 2596 assert (dest->type() == T_INT, "unexpected result type"); 2597 assert (left->type() == T_INT, "unexpected left type"); 2598 __ neg_32(dest->as_register(), left->as_register()); 2599 } else if (left->is_double_cpu()) { 2600 Register dest_lo = dest->as_register_lo(); 2601 Register dest_hi = dest->as_register_hi(); 2602 Register src_lo = left->as_register_lo(); 2603 Register src_hi = left->as_register_hi(); 2604 if (dest_lo == src_hi) { 2605 dest_lo = Rtemp; 2606 } 2607 __ rsbs(dest_lo, src_lo, 0); 2608 __ rsc(dest_hi, src_hi, 0); 2609 move_regs(dest_lo, dest->as_register_lo()); 2610 } else if (left->is_single_fpu()) { 2611 __ neg_float(dest->as_float_reg(), left->as_float_reg()); 2612 } else if (left->is_double_fpu()) { 2613 __ neg_double(dest->as_double_reg(), left->as_double_reg()); 2614 } else { 2615 ShouldNotReachHere(); 2616 } 2617 } 2618 2619 2620 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2621 assert(patch_code == lir_patch_none, "Patch code not supported"); 2622 LIR_Address* addr = addr_opr->as_address_ptr(); 2623 if (addr->index()->is_illegal()) { 2624 jint c = addr->disp(); 2625 if (!Assembler::is_arith_imm_in_range(c)) { 2626 BAILOUT("illegal arithmetic operand"); 2627 } 2628 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c); 2629 } else { 2630 assert(addr->disp() == 0, "cannot handle otherwise"); 2631 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), 2632 AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale())); 2633 } 2634 } 2635 2636 2637 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2638 assert(!tmp->is_valid(), "don't need temporary"); 2639 __ call(dest); 2640 if (info != nullptr) { 2641 add_call_info_here(info); 2642 } 2643 } 2644 2645 2646 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2647 assert((src->is_double_cpu() && dest->is_address()) || 2648 (src->is_address() && dest->is_double_cpu()), 2649 "Simple move_op is called for all other cases"); 2650 2651 int null_check_offset; 2652 if (dest->is_address()) { 2653 // Store 2654 const LIR_Address* addr = dest->as_address_ptr(); 2655 const Register src_lo = src->as_register_lo(); 2656 const Register src_hi = src->as_register_hi(); 2657 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2658 2659 if (src_lo < src_hi) { 2660 null_check_offset = __ offset(); 2661 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi)); 2662 } else { 2663 assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2664 __ mov(Rtemp, src_hi); 2665 null_check_offset = __ offset(); 2666 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp)); 2667 } 2668 } else { 2669 // Load 2670 const LIR_Address* addr = src->as_address_ptr(); 2671 const Register dest_lo = dest->as_register_lo(); 2672 const Register dest_hi = dest->as_register_hi(); 2673 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already"); 2674 2675 null_check_offset = __ offset(); 2676 if (dest_lo < dest_hi) { 2677 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi)); 2678 } else { 2679 assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register"); 2680 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp)); 2681 __ mov(dest_hi, Rtemp); 2682 } 2683 } 2684 2685 if (info != nullptr) { 2686 add_debug_info_for_null_check(null_check_offset, info); 2687 } 2688 } 2689 2690 2691 void LIR_Assembler::membar() { 2692 __ membar(MacroAssembler::StoreLoad, Rtemp); 2693 } 2694 2695 void LIR_Assembler::membar_acquire() { 2696 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); 2697 } 2698 2699 void LIR_Assembler::membar_release() { 2700 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2701 } 2702 2703 void LIR_Assembler::membar_loadload() { 2704 __ membar(MacroAssembler::LoadLoad, Rtemp); 2705 } 2706 2707 void LIR_Assembler::membar_storestore() { 2708 __ membar(MacroAssembler::StoreStore, Rtemp); 2709 } 2710 2711 void LIR_Assembler::membar_loadstore() { 2712 __ membar(MacroAssembler::LoadStore, Rtemp); 2713 } 2714 2715 void LIR_Assembler::membar_storeload() { 2716 __ membar(MacroAssembler::StoreLoad, Rtemp); 2717 } 2718 2719 void LIR_Assembler::on_spin_wait() { 2720 Unimplemented(); 2721 } 2722 2723 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2724 // Not used on ARM 2725 Unimplemented(); 2726 } 2727 2728 void LIR_Assembler::peephole(LIR_List* lir) { 2729 LIR_OpList* inst = lir->instructions_list(); 2730 const int inst_length = inst->length(); 2731 for (int i = 0; i < inst_length; i++) { 2732 LIR_Op* op = inst->at(i); 2733 switch (op->code()) { 2734 case lir_cmp: { 2735 // Replace: 2736 // cmp rX, y 2737 // cmove [EQ] y, z, rX 2738 // with 2739 // cmp rX, y 2740 // cmove [EQ] illegalOpr, z, rX 2741 // 2742 // or 2743 // cmp rX, y 2744 // cmove [NE] z, y, rX 2745 // with 2746 // cmp rX, y 2747 // cmove [NE] z, illegalOpr, rX 2748 // 2749 // moves from illegalOpr should be removed when converting LIR to native assembly 2750 2751 LIR_Op2* cmp = op->as_Op2(); 2752 assert(cmp != nullptr, "cmp LIR instruction is not an op2"); 2753 2754 if (i + 1 < inst_length) { 2755 LIR_Op2* cmove = inst->at(i + 1)->as_Op2(); 2756 if (cmove != nullptr && cmove->code() == lir_cmove) { 2757 LIR_Opr cmove_res = cmove->result_opr(); 2758 bool res_is_op1 = cmove_res == cmp->in_opr1(); 2759 bool res_is_op2 = cmove_res == cmp->in_opr2(); 2760 LIR_Opr cmp_res, cmp_arg; 2761 if (res_is_op1) { 2762 cmp_res = cmp->in_opr1(); 2763 cmp_arg = cmp->in_opr2(); 2764 } else if (res_is_op2) { 2765 cmp_res = cmp->in_opr2(); 2766 cmp_arg = cmp->in_opr1(); 2767 } else { 2768 cmp_res = LIR_OprFact::illegalOpr; 2769 cmp_arg = LIR_OprFact::illegalOpr; 2770 } 2771 2772 if (cmp_res != LIR_OprFact::illegalOpr) { 2773 LIR_Condition cond = cmove->condition(); 2774 if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) { 2775 cmove->set_in_opr1(LIR_OprFact::illegalOpr); 2776 } else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) { 2777 cmove->set_in_opr2(LIR_OprFact::illegalOpr); 2778 } 2779 } 2780 } 2781 } 2782 break; 2783 } 2784 2785 default: 2786 break; 2787 } 2788 } 2789 } 2790 2791 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2792 assert(src->is_address(), "sanity"); 2793 Address addr = as_Address(src->as_address_ptr()); 2794 2795 if (code == lir_xchg) { 2796 } else { 2797 assert (!data->is_oop(), "xadd for oops"); 2798 } 2799 2800 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); 2801 2802 Label retry; 2803 __ bind(retry); 2804 2805 if (data->type() == T_INT || data->is_oop()) { 2806 Register dst = dest->as_register(); 2807 Register new_val = noreg; 2808 __ ldrex(dst, addr); 2809 if (code == lir_xadd) { 2810 Register tmp_reg = tmp->as_register(); 2811 if (data->is_constant()) { 2812 assert_different_registers(dst, tmp_reg); 2813 __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint()); 2814 } else { 2815 assert_different_registers(dst, tmp_reg, data->as_register()); 2816 __ add_32(tmp_reg, dst, data->as_register()); 2817 } 2818 new_val = tmp_reg; 2819 } else { 2820 if (UseCompressedOops && data->is_oop()) { 2821 new_val = tmp->as_pointer_register(); 2822 } else { 2823 new_val = data->as_register(); 2824 } 2825 assert_different_registers(dst, new_val); 2826 } 2827 __ strex(Rtemp, new_val, addr); 2828 2829 } else if (data->type() == T_LONG) { 2830 Register dst_lo = dest->as_register_lo(); 2831 Register new_val_lo = noreg; 2832 Register dst_hi = dest->as_register_hi(); 2833 2834 assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair"); 2835 assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2836 2837 __ bind(retry); 2838 __ ldrexd(dst_lo, addr); 2839 if (code == lir_xadd) { 2840 Register tmp_lo = tmp->as_register_lo(); 2841 Register tmp_hi = tmp->as_register_hi(); 2842 2843 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair"); 2844 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2845 2846 if (data->is_constant()) { 2847 jlong c = data->as_constant_ptr()->as_jlong(); 2848 assert((jlong)((jint)c) == c, "overflow"); 2849 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi); 2850 __ adds(tmp_lo, dst_lo, (jint)c); 2851 __ adc(tmp_hi, dst_hi, 0); 2852 } else { 2853 Register new_val_lo = data->as_register_lo(); 2854 Register new_val_hi = data->as_register_hi(); 2855 __ adds(tmp_lo, dst_lo, new_val_lo); 2856 __ adc(tmp_hi, dst_hi, new_val_hi); 2857 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi); 2858 } 2859 new_val_lo = tmp_lo; 2860 } else { 2861 new_val_lo = data->as_register_lo(); 2862 Register new_val_hi = data->as_register_hi(); 2863 2864 assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi); 2865 assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair"); 2866 assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair"); 2867 } 2868 __ strexd(Rtemp, new_val_lo, addr); 2869 } else { 2870 ShouldNotReachHere(); 2871 } 2872 2873 __ cbnz_32(Rtemp, retry); 2874 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); 2875 2876 } 2877 2878 #undef __