1 /* 2 * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "asm/assembler.hpp" 29 #include "asm/macroAssembler.inline.hpp" 30 #include "c1/c1_CodeStubs.hpp" 31 #include "c1/c1_Compilation.hpp" 32 #include "c1/c1_LIRAssembler.hpp" 33 #include "c1/c1_MacroAssembler.hpp" 34 #include "c1/c1_Runtime1.hpp" 35 #include "c1/c1_ValueStack.hpp" 36 #include "ci/ciArrayKlass.hpp" 37 #include "ci/ciInstance.hpp" 38 #include "code/compiledIC.hpp" 39 #include "gc/shared/collectedHeap.hpp" 40 #include "nativeInst_riscv.hpp" 41 #include "oops/objArrayKlass.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "utilities/powerOfTwo.hpp" 45 #include "vmreg_riscv.inline.hpp" 46 47 #ifndef PRODUCT 48 #define COMMENT(x) do { __ block_comment(x); } while (0) 49 #else 50 #define COMMENT(x) 51 #endif 52 53 NEEDS_CLEANUP // remove this definitions ? 54 const Register IC_Klass = t1; // where the IC klass is cached 55 const Register SYNC_header = x10; // synchronization header 56 const Register SHIFT_count = x10; // where count for shift operations must be 57 58 #define __ _masm-> 59 60 static void select_different_registers(Register preserve, 61 Register extra, 62 Register &tmp1, 63 Register &tmp2) { 64 if (tmp1 == preserve) { 65 assert_different_registers(tmp1, tmp2, extra); 66 tmp1 = extra; 67 } else if (tmp2 == preserve) { 68 assert_different_registers(tmp1, tmp2, extra); 69 tmp2 = extra; 70 } 71 assert_different_registers(preserve, tmp1, tmp2); 72 } 73 74 static void select_different_registers(Register preserve, 75 Register extra, 76 Register &tmp1, 77 Register &tmp2, 78 Register &tmp3) { 79 if (tmp1 == preserve) { 80 assert_different_registers(tmp1, tmp2, tmp3, extra); 81 tmp1 = extra; 82 } else if (tmp2 == preserve) { 83 assert_different_registers(tmp1, tmp2, tmp3, extra); 84 tmp2 = extra; 85 } else if (tmp3 == preserve) { 86 assert_different_registers(tmp1, tmp2, tmp3, extra); 87 tmp3 = extra; 88 } 89 assert_different_registers(preserve, tmp1, tmp2, tmp3); 90 } 91 92 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; } 93 94 void LIR_Assembler::clinit_barrier(ciMethod* method) { 95 assert(VM_Version::supports_fast_class_init_checks(), "sanity"); 96 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 97 98 Label L_skip_barrier; 99 100 __ mov_metadata(t1, method->holder()->constant_encoding()); 101 __ clinit_barrier(t1, t0, &L_skip_barrier /* L_fast_path */); 102 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 103 __ bind(L_skip_barrier); 104 } 105 106 LIR_Opr LIR_Assembler::receiverOpr() { 107 return FrameMap::receiver_opr; 108 } 109 110 LIR_Opr LIR_Assembler::osrBufferPointer() { 111 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 112 } 113 114 void LIR_Assembler::breakpoint() { Unimplemented(); } 115 116 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); } 117 118 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); } 119 120 static jlong as_long(LIR_Opr data) { 121 jlong result; 122 switch (data->type()) { 123 case T_INT: 124 result = (data->as_jint()); 125 break; 126 case T_LONG: 127 result = (data->as_jlong()); 128 break; 129 default: 130 ShouldNotReachHere(); 131 result = 0; // unreachable 132 } 133 return result; 134 } 135 136 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 137 if (addr->base()->is_illegal()) { 138 assert(addr->index()->is_illegal(), "must be illegal too"); 139 __ movptr(tmp, addr->disp()); 140 return Address(tmp, 0); 141 } 142 143 Register base = addr->base()->as_pointer_register(); 144 LIR_Opr index_opr = addr->index(); 145 146 if (index_opr->is_illegal()) { 147 return Address(base, addr->disp()); 148 } 149 150 int scale = addr->scale(); 151 if (index_opr->is_cpu_register()) { 152 Register index; 153 if (index_opr->is_single_cpu()) { 154 index = index_opr->as_register(); 155 } else { 156 index = index_opr->as_register_lo(); 157 } 158 if (scale != 0) { 159 __ shadd(tmp, index, base, tmp, scale); 160 } else { 161 __ add(tmp, base, index); 162 } 163 return Address(tmp, addr->disp()); 164 } else if (index_opr->is_constant()) { 165 intptr_t addr_offset = (((intptr_t)index_opr->as_constant_ptr()->as_jint()) << scale) + addr->disp(); 166 return Address(base, addr_offset); 167 } 168 169 Unimplemented(); 170 return Address(); 171 } 172 173 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 174 ShouldNotReachHere(); 175 return Address(); 176 } 177 178 Address LIR_Assembler::as_Address(LIR_Address* addr) { 179 return as_Address(addr, t0); 180 } 181 182 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 183 return as_Address(addr); 184 } 185 186 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is 187 // not encodable as a base + (immediate) offset, generate an explicit address 188 // calculation to hold the address in t0. 189 Address LIR_Assembler::stack_slot_address(int index, uint size, int adjust) { 190 precond(size == 4 || size == 8); 191 Address addr = frame_map()->address_for_slot(index, adjust); 192 precond(addr.getMode() == Address::base_plus_offset); 193 precond(addr.base() == sp); 194 precond(addr.offset() > 0); 195 uint mask = size - 1; 196 assert((addr.offset() & mask) == 0, "scaled offsets only"); 197 198 return addr; 199 } 200 201 void LIR_Assembler::osr_entry() { 202 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 203 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 204 guarantee(osr_entry != NULL, "NULL osr_entry!"); 205 ValueStack* entry_state = osr_entry->state(); 206 int number_of_locks = entry_state->locks_size(); 207 208 // we jump here if osr happens with the interpreter 209 // state set up to continue at the beginning of the 210 // loop that triggered osr - in particular, we have 211 // the following registers setup: 212 // 213 // x12: osr buffer 214 // 215 216 //build frame 217 ciMethod* m = compilation()->method(); 218 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 219 220 // OSR buffer is 221 // 222 // locals[nlocals-1..0] 223 // monitors[0..number_of_locks] 224 // 225 // locals is a direct copy of the interpreter frame so in the osr buffer 226 // so first slot in the local array is the last local from the interpreter 227 // and last slot is local[0] (receiver) from the interpreter 228 // 229 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 230 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 231 // in the interpreter frame (the method lock if a sync method) 232 233 // Initialize monitors in the compiled activation. 234 // x12: pointer to osr buffer 235 // All other registers are dead at this point and the locals will be 236 // copied into place by code emitted in the IR. 237 238 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 239 { 240 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 241 int monitor_offset = BytesPerWord * method()->max_locals() + 242 (2 * BytesPerWord) * (number_of_locks - 1); 243 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 244 // the OSR buffer using 2 word entries: first the lock and then 245 // the oop. 246 for (int i = 0; i < number_of_locks; i++) { 247 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 248 #ifdef ASSERT 249 // verify the interpreter's monitor has a non-null object 250 { 251 Label L; 252 __ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord)); 253 __ bnez(t0, L); 254 __ stop("locked object is NULL"); 255 __ bind(L); 256 } 257 #endif // ASSERT 258 __ ld(x9, Address(OSR_buf, slot_offset + 0)); 259 __ sd(x9, frame_map()->address_for_monitor_lock(i)); 260 __ ld(x9, Address(OSR_buf, slot_offset + 1 * BytesPerWord)); 261 __ sd(x9, frame_map()->address_for_monitor_object(i)); 262 } 263 } 264 } 265 266 // inline cache check; done before the frame is built. 267 int LIR_Assembler::check_icache() { 268 Register receiver = FrameMap::receiver_opr->as_register(); 269 Register ic_klass = IC_Klass; 270 int start_offset = __ offset(); 271 Label dont; 272 __ inline_cache_check(receiver, ic_klass, dont); 273 274 // if icache check fails, then jump to runtime routine 275 // Note: RECEIVER must still contain the receiver! 276 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 277 278 // We align the verified entry point unless the method body 279 // (including its inline cache check) will fit in a single 64-byte 280 // icache line. 281 if (!method()->is_accessor() || __ offset() - start_offset > 4 * 4) { 282 // force alignment after the cache check. 283 __ align(CodeEntryAlignment); 284 } 285 286 __ bind(dont); 287 return start_offset; 288 } 289 290 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 291 if (o == NULL) { 292 __ mv(reg, zr); 293 } else { 294 __ movoop(reg, o, /* immediate */ true); 295 } 296 } 297 298 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 299 deoptimize_trap(info); 300 } 301 302 // This specifies the rsp decrement needed to build the frame 303 int LIR_Assembler::initial_frame_size_in_bytes() const { 304 // if rounding, must let FrameMap know! 305 306 return in_bytes(frame_map()->framesize_in_bytes()); 307 } 308 309 int LIR_Assembler::emit_exception_handler() { 310 // if the last instruction is a call (typically to do a throw which 311 // is coming at the end after block reordering) the return address 312 // must still point into the code area in order to avoid assertion 313 // failures when searching for the corresponding bci ==> add a nop 314 // (was bug 5/14/1999 -gri) 315 __ nop(); 316 317 // generate code for exception handler 318 address handler_base = __ start_a_stub(exception_handler_size()); 319 if (handler_base == NULL) { 320 // not enough space left for the handler 321 bailout("exception handler overflow"); 322 return -1; 323 } 324 325 int offset = code_offset(); 326 327 // the exception oop and pc are in x10, and x13 328 // no other registers need to be preserved, so invalidate them 329 __ invalidate_registers(false, true, true, false, true, true); 330 331 // check that there is really an exception 332 __ verify_not_null_oop(x10); 333 334 // search an exception handler (x10: exception oop, x13: throwing pc) 335 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); 336 __ should_not_reach_here(); 337 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 338 __ end_a_stub(); 339 340 return offset; 341 } 342 343 // Emit the code to remove the frame from the stack in the exception 344 // unwind path. 345 int LIR_Assembler::emit_unwind_handler() { 346 #ifndef PRODUCT 347 if (CommentedAssembly) { 348 _masm->block_comment("Unwind handler"); 349 } 350 #endif // PRODUCT 351 352 int offset = code_offset(); 353 354 // Fetch the exception from TLS and clear out exception related thread state 355 __ ld(x10, Address(xthread, JavaThread::exception_oop_offset())); 356 __ sd(zr, Address(xthread, JavaThread::exception_oop_offset())); 357 __ sd(zr, Address(xthread, JavaThread::exception_pc_offset())); 358 359 __ bind(_unwind_handler_entry); 360 __ verify_not_null_oop(x10); 361 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 362 __ mv(x9, x10); // Perserve the exception 363 } 364 365 // Preform needed unlocking 366 MonitorExitStub* stub = NULL; 367 if (method()->is_synchronized()) { 368 monitor_address(0, FrameMap::r10_opr); 369 stub = new MonitorExitStub(FrameMap::r10_opr, true, 0); 370 if (UseHeavyMonitors) { 371 __ j(*stub->entry()); 372 } else { 373 __ unlock_object(x15, x14, x10, *stub->entry()); 374 } 375 __ bind(*stub->continuation()); 376 } 377 378 if (compilation()->env()->dtrace_method_probes()) { 379 __ mv(c_rarg0, xthread); 380 __ mov_metadata(c_rarg1, method()->constant_encoding()); 381 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1); 382 } 383 384 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 385 __ mv(x10, x9); // Restore the exception 386 } 387 388 // remove the activation and dispatch to the unwind handler 389 __ block_comment("remove_frame and dispatch to the unwind handler"); 390 __ remove_frame(initial_frame_size_in_bytes()); 391 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 392 393 // Emit the slow path assembly 394 if (stub != NULL) { 395 stub->emit_code(this); 396 } 397 398 return offset; 399 } 400 401 int LIR_Assembler::emit_deopt_handler() { 402 // if the last instruciton is a call (typically to do a throw which 403 // is coming at the end after block reordering) the return address 404 // must still point into the code area in order to avoid assertion 405 // failures when searching for the corresponding bck => add a nop 406 // (was bug 5/14/1999 - gri) 407 __ nop(); 408 409 // generate code for exception handler 410 address handler_base = __ start_a_stub(deopt_handler_size()); 411 if (handler_base == NULL) { 412 // not enough space left for the handler 413 bailout("deopt handler overflow"); 414 return -1; 415 } 416 417 int offset = code_offset(); 418 419 __ auipc(ra, 0); 420 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 421 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 422 __ end_a_stub(); 423 424 return offset; 425 } 426 427 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 428 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == x10, "word returns are in x10"); 429 430 // Pop the stack before the safepoint code 431 __ remove_frame(initial_frame_size_in_bytes()); 432 433 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 434 __ reserved_stack_check(); 435 } 436 437 code_stub->set_safepoint_offset(__ offset()); 438 __ relocate(relocInfo::poll_return_type); 439 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */); 440 __ ret(); 441 } 442 443 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 444 guarantee(info != NULL, "Shouldn't be NULL"); 445 __ get_polling_page(t0, relocInfo::poll_type); 446 add_debug_info_for_branch(info); // This isn't just debug info: 447 // it's the oop map 448 __ read_polling_page(t0, 0, relocInfo::poll_type); 449 return __ offset(); 450 } 451 452 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 453 __ mv(to_reg, from_reg); 454 } 455 456 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); } 457 458 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 459 assert(src->is_constant(), "should not call otherwise"); 460 assert(dest->is_register(), "should not call otherwise"); 461 LIR_Const* c = src->as_constant_ptr(); 462 address const_addr = NULL; 463 464 switch (c->type()) { 465 case T_INT: 466 assert(patch_code == lir_patch_none, "no patching handled here"); 467 __ mvw(dest->as_register(), c->as_jint()); 468 break; 469 470 case T_ADDRESS: 471 assert(patch_code == lir_patch_none, "no patching handled here"); 472 __ mv(dest->as_register(), c->as_jint()); 473 break; 474 475 case T_LONG: 476 assert(patch_code == lir_patch_none, "no patching handled here"); 477 __ mv(dest->as_register_lo(), (intptr_t)c->as_jlong()); 478 break; 479 480 case T_OBJECT: 481 case T_ARRAY: 482 if (patch_code == lir_patch_none) { 483 jobject2reg(c->as_jobject(), dest->as_register()); 484 } else { 485 jobject2reg_with_patching(dest->as_register(), info); 486 } 487 break; 488 489 case T_METADATA: 490 if (patch_code != lir_patch_none) { 491 klass2reg_with_patching(dest->as_register(), info); 492 } else { 493 __ mov_metadata(dest->as_register(), c->as_metadata()); 494 } 495 break; 496 497 case T_FLOAT: 498 const_addr = float_constant(c->as_jfloat()); 499 assert(const_addr != NULL, "must create float constant in the constant table"); 500 __ flw(dest->as_float_reg(), InternalAddress(const_addr)); 501 break; 502 503 case T_DOUBLE: 504 const_addr = double_constant(c->as_jdouble()); 505 assert(const_addr != NULL, "must create double constant in the constant table"); 506 __ fld(dest->as_double_reg(), InternalAddress(const_addr)); 507 break; 508 509 default: 510 ShouldNotReachHere(); 511 } 512 } 513 514 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 515 assert(src->is_constant(), "should not call otherwise"); 516 assert(dest->is_stack(), "should not call otherwise"); 517 LIR_Const* c = src->as_constant_ptr(); 518 switch (c->type()) { 519 case T_OBJECT: 520 if (c->as_jobject() == NULL) { 521 __ sd(zr, frame_map()->address_for_slot(dest->single_stack_ix())); 522 } else { 523 const2reg(src, FrameMap::t1_opr, lir_patch_none, NULL); 524 reg2stack(FrameMap::t1_opr, dest, c->type(), false); 525 } 526 break; 527 case T_ADDRESS: // fall through 528 const2reg(src, FrameMap::t1_opr, lir_patch_none, NULL); 529 reg2stack(FrameMap::t1_opr, dest, c->type(), false); 530 case T_INT: // fall through 531 case T_FLOAT: 532 if (c->as_jint_bits() == 0) { 533 __ sw(zr, frame_map()->address_for_slot(dest->single_stack_ix())); 534 } else { 535 __ mvw(t1, c->as_jint_bits()); 536 __ sw(t1, frame_map()->address_for_slot(dest->single_stack_ix())); 537 } 538 break; 539 case T_LONG: // fall through 540 case T_DOUBLE: 541 if (c->as_jlong_bits() == 0) { 542 __ sd(zr, frame_map()->address_for_slot(dest->double_stack_ix(), 543 lo_word_offset_in_bytes)); 544 } else { 545 __ mv(t1, (intptr_t)c->as_jlong_bits()); 546 __ sd(t1, frame_map()->address_for_slot(dest->double_stack_ix(), 547 lo_word_offset_in_bytes)); 548 } 549 break; 550 default: 551 ShouldNotReachHere(); 552 } 553 } 554 555 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 556 assert(src->is_constant(), "should not call otherwise"); 557 assert(dest->is_address(), "should not call otherwise"); 558 LIR_Const* c = src->as_constant_ptr(); 559 LIR_Address* to_addr = dest->as_address_ptr(); 560 void (Assembler::* insn)(Register Rt, const Address &adr, Register temp); 561 switch (type) { 562 case T_ADDRESS: 563 assert(c->as_jint() == 0, "should be"); 564 insn = &Assembler::sd; break; 565 case T_LONG: 566 assert(c->as_jlong() == 0, "should be"); 567 insn = &Assembler::sd; break; 568 case T_DOUBLE: 569 assert(c->as_jdouble() == 0.0, "should be"); 570 insn = &Assembler::sd; break; 571 case T_INT: 572 assert(c->as_jint() == 0, "should be"); 573 insn = &Assembler::sw; break; 574 case T_FLOAT: 575 assert(c->as_jfloat() == 0.0f, "should be"); 576 insn = &Assembler::sw; break; 577 case T_OBJECT: // fall through 578 case T_ARRAY: 579 assert(c->as_jobject() == 0, "should be"); 580 if (UseCompressedOops && !wide) { 581 insn = &Assembler::sw; 582 } else { 583 insn = &Assembler::sd; 584 } 585 break; 586 case T_CHAR: // fall through 587 case T_SHORT: 588 assert(c->as_jint() == 0, "should be"); 589 insn = &Assembler::sh; 590 break; 591 case T_BOOLEAN: // fall through 592 case T_BYTE: 593 assert(c->as_jint() == 0, "should be"); 594 insn = &Assembler::sb; break; 595 default: 596 ShouldNotReachHere(); 597 insn = &Assembler::sd; // unreachable 598 } 599 if (info != NULL) { 600 add_debug_info_for_null_check_here(info); 601 } 602 (_masm->*insn)(zr, as_Address(to_addr), t0); 603 } 604 605 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 606 assert(src->is_register(), "should not call otherwise"); 607 assert(dest->is_register(), "should not call otherwise"); 608 609 // move between cpu-registers 610 if (dest->is_single_cpu()) { 611 if (src->type() == T_LONG) { 612 // Can do LONG -> OBJECT 613 move_regs(src->as_register_lo(), dest->as_register()); 614 return; 615 } 616 assert(src->is_single_cpu(), "must match"); 617 if (src->type() == T_OBJECT) { 618 __ verify_oop(src->as_register()); 619 } 620 move_regs(src->as_register(), dest->as_register()); 621 } else if (dest->is_double_cpu()) { 622 if (is_reference_type(src->type())) { 623 __ verify_oop(src->as_register()); 624 move_regs(src->as_register(), dest->as_register_lo()); 625 return; 626 } 627 assert(src->is_double_cpu(), "must match"); 628 Register f_lo = src->as_register_lo(); 629 Register f_hi = src->as_register_hi(); 630 Register t_lo = dest->as_register_lo(); 631 Register t_hi = dest->as_register_hi(); 632 assert(f_hi == f_lo, "must be same"); 633 assert(t_hi == t_lo, "must be same"); 634 move_regs(f_lo, t_lo); 635 } else if (dest->is_single_fpu()) { 636 assert(src->is_single_fpu(), "expect single fpu"); 637 __ fmv_s(dest->as_float_reg(), src->as_float_reg()); 638 } else if (dest->is_double_fpu()) { 639 assert(src->is_double_fpu(), "expect double fpu"); 640 __ fmv_d(dest->as_double_reg(), src->as_double_reg()); 641 } else { 642 ShouldNotReachHere(); 643 } 644 } 645 646 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 647 precond(src->is_register() && dest->is_stack()); 648 649 uint const c_sz32 = sizeof(uint32_t); 650 uint const c_sz64 = sizeof(uint64_t); 651 652 assert(src->is_register(), "should not call otherwise"); 653 assert(dest->is_stack(), "should not call otherwise"); 654 if (src->is_single_cpu()) { 655 int index = dest->single_stack_ix(); 656 if (is_reference_type(type)) { 657 __ sd(src->as_register(), stack_slot_address(index, c_sz64)); 658 __ verify_oop(src->as_register()); 659 } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) { 660 __ sd(src->as_register(), stack_slot_address(index, c_sz64)); 661 } else { 662 __ sw(src->as_register(), stack_slot_address(index, c_sz32)); 663 } 664 } else if (src->is_double_cpu()) { 665 int index = dest->double_stack_ix(); 666 Address dest_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes); 667 __ sd(src->as_register_lo(), dest_addr_LO); 668 } else if (src->is_single_fpu()) { 669 int index = dest->single_stack_ix(); 670 __ fsw(src->as_float_reg(), stack_slot_address(index, c_sz32)); 671 } else if (src->is_double_fpu()) { 672 int index = dest->double_stack_ix(); 673 __ fsd(src->as_double_reg(), stack_slot_address(index, c_sz64)); 674 } else { 675 ShouldNotReachHere(); 676 } 677 } 678 679 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) { 680 LIR_Address* to_addr = dest->as_address_ptr(); 681 // t0 was used as tmp reg in as_Address, so we use t1 as compressed_src 682 Register compressed_src = t1; 683 684 if (patch_code != lir_patch_none) { 685 deoptimize_trap(info); 686 return; 687 } 688 689 if (is_reference_type(type)) { 690 __ verify_oop(src->as_register()); 691 692 if (UseCompressedOops && !wide) { 693 __ encode_heap_oop(compressed_src, src->as_register()); 694 } else { 695 compressed_src = src->as_register(); 696 } 697 } 698 699 int null_check_here = code_offset(); 700 701 switch (type) { 702 case T_FLOAT: 703 __ fsw(src->as_float_reg(), as_Address(to_addr)); 704 break; 705 706 case T_DOUBLE: 707 __ fsd(src->as_double_reg(), as_Address(to_addr)); 708 break; 709 710 case T_ARRAY: // fall through 711 case T_OBJECT: 712 if (UseCompressedOops && !wide) { 713 __ sw(compressed_src, as_Address(to_addr)); 714 } else { 715 __ sd(compressed_src, as_Address(to_addr)); 716 } 717 break; 718 case T_METADATA: 719 // We get here to store a method pointer to the stack to pass to 720 // a dtrace runtime call. This can't work on 64 bit with 721 // compressed klass ptrs: T_METADATA can be compressed klass 722 // ptr or a 64 bit method pointer. 723 ShouldNotReachHere(); 724 __ sd(src->as_register(), as_Address(to_addr)); 725 break; 726 case T_ADDRESS: 727 __ sd(src->as_register(), as_Address(to_addr)); 728 break; 729 case T_INT: 730 __ sw(src->as_register(), as_Address(to_addr)); 731 break; 732 case T_LONG: 733 __ sd(src->as_register_lo(), as_Address(to_addr)); 734 break; 735 case T_BYTE: // fall through 736 case T_BOOLEAN: 737 __ sb(src->as_register(), as_Address(to_addr)); 738 break; 739 case T_CHAR: // fall through 740 case T_SHORT: 741 __ sh(src->as_register(), as_Address(to_addr)); 742 break; 743 default: 744 ShouldNotReachHere(); 745 } 746 747 if (info != NULL) { 748 add_debug_info_for_null_check(null_check_here, info); 749 } 750 } 751 752 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 753 precond(src->is_stack() && dest->is_register()); 754 755 uint const c_sz32 = sizeof(uint32_t); 756 uint const c_sz64 = sizeof(uint64_t); 757 758 if (dest->is_single_cpu()) { 759 int index = src->single_stack_ix(); 760 if (type == T_INT) { 761 __ lw(dest->as_register(), stack_slot_address(index, c_sz32)); 762 } else if (is_reference_type(type)) { 763 __ ld(dest->as_register(), stack_slot_address(index, c_sz64)); 764 __ verify_oop(dest->as_register()); 765 } else if (type == T_METADATA || type == T_ADDRESS) { 766 __ ld(dest->as_register(), stack_slot_address(index, c_sz64)); 767 } else { 768 __ lwu(dest->as_register(), stack_slot_address(index, c_sz32)); 769 } 770 } else if (dest->is_double_cpu()) { 771 int index = src->double_stack_ix(); 772 Address src_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes); 773 __ ld(dest->as_register_lo(), src_addr_LO); 774 } else if (dest->is_single_fpu()) { 775 int index = src->single_stack_ix(); 776 __ flw(dest->as_float_reg(), stack_slot_address(index, c_sz32)); 777 } else if (dest->is_double_fpu()) { 778 int index = src->double_stack_ix(); 779 __ fld(dest->as_double_reg(), stack_slot_address(index, c_sz64)); 780 } else { 781 ShouldNotReachHere(); 782 } 783 } 784 785 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 786 deoptimize_trap(info); 787 } 788 789 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 790 LIR_Opr temp; 791 if (type == T_LONG || type == T_DOUBLE) { 792 temp = FrameMap::t1_long_opr; 793 } else { 794 temp = FrameMap::t1_opr; 795 } 796 797 stack2reg(src, temp, src->type()); 798 reg2stack(temp, dest, dest->type(), false); 799 } 800 801 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) { 802 assert(src->is_address(), "should not call otherwise"); 803 assert(dest->is_register(), "should not call otherwise"); 804 805 LIR_Address* addr = src->as_address_ptr(); 806 LIR_Address* from_addr = src->as_address_ptr(); 807 808 if (addr->base()->type() == T_OBJECT) { 809 __ verify_oop(addr->base()->as_pointer_register()); 810 } 811 812 if (patch_code != lir_patch_none) { 813 deoptimize_trap(info); 814 return; 815 } 816 817 if (info != NULL) { 818 add_debug_info_for_null_check_here(info); 819 } 820 821 int null_check_here = code_offset(); 822 switch (type) { 823 case T_FLOAT: 824 __ flw(dest->as_float_reg(), as_Address(from_addr)); 825 break; 826 case T_DOUBLE: 827 __ fld(dest->as_double_reg(), as_Address(from_addr)); 828 break; 829 case T_ARRAY: // fall through 830 case T_OBJECT: 831 if (UseCompressedOops && !wide) { 832 __ lwu(dest->as_register(), as_Address(from_addr)); 833 } else { 834 __ ld(dest->as_register(), as_Address(from_addr)); 835 } 836 break; 837 case T_METADATA: 838 // We get here to store a method pointer to the stack to pass to 839 // a dtrace runtime call. This can't work on 64 bit with 840 // compressed klass ptrs: T_METADATA can be a compressed klass 841 // ptr or a 64 bit method pointer. 842 ShouldNotReachHere(); 843 __ ld(dest->as_register(), as_Address(from_addr)); 844 break; 845 case T_ADDRESS: 846 __ ld(dest->as_register(), as_Address(from_addr)); 847 break; 848 case T_INT: 849 __ lw(dest->as_register(), as_Address(from_addr)); 850 break; 851 case T_LONG: 852 __ ld(dest->as_register_lo(), as_Address_lo(from_addr)); 853 break; 854 case T_BYTE: 855 __ lb(dest->as_register(), as_Address(from_addr)); 856 break; 857 case T_BOOLEAN: 858 __ lbu(dest->as_register(), as_Address(from_addr)); 859 break; 860 case T_CHAR: 861 __ lhu(dest->as_register(), as_Address(from_addr)); 862 break; 863 case T_SHORT: 864 __ lh(dest->as_register(), as_Address(from_addr)); 865 break; 866 default: 867 ShouldNotReachHere(); 868 } 869 870 if (is_reference_type(type)) { 871 if (UseCompressedOops && !wide) { 872 __ decode_heap_oop(dest->as_register()); 873 } 874 875 if (!UseZGC) { 876 // Load barrier has not yet been applied, so ZGC can't verify the oop here 877 __ verify_oop(dest->as_register()); 878 } 879 } 880 } 881 882 void LIR_Assembler::emit_op3(LIR_Op3* op) { 883 switch (op->code()) { 884 case lir_idiv: // fall through 885 case lir_irem: 886 arithmetic_idiv(op->code(), 887 op->in_opr1(), 888 op->in_opr2(), 889 op->in_opr3(), 890 op->result_opr(), 891 op->info()); 892 break; 893 case lir_fmad: 894 __ fmadd_d(op->result_opr()->as_double_reg(), 895 op->in_opr1()->as_double_reg(), 896 op->in_opr2()->as_double_reg(), 897 op->in_opr3()->as_double_reg()); 898 break; 899 case lir_fmaf: 900 __ fmadd_s(op->result_opr()->as_float_reg(), 901 op->in_opr1()->as_float_reg(), 902 op->in_opr2()->as_float_reg(), 903 op->in_opr3()->as_float_reg()); 904 break; 905 default: 906 ShouldNotReachHere(); 907 } 908 } 909 910 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 911 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 912 Label label; 913 914 emit_branch(condition, cmp_opr1, cmp_opr2, label, /* is_far */ false, 915 /* is_unordered */ (condition == lir_cond_greaterEqual || condition == lir_cond_greater) ? false : true); 916 917 Label done; 918 move_op(opr2, result, type, lir_patch_none, NULL, 919 false, // pop_fpu_stack 920 false); // wide 921 __ j(done); 922 __ bind(label); 923 move_op(opr1, result, type, lir_patch_none, NULL, 924 false, // pop_fpu_stack 925 false); // wide 926 __ bind(done); 927 } 928 929 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 930 LIR_Condition condition = op->cond(); 931 if (condition == lir_cond_always) { 932 if (op->info() != NULL) { 933 add_debug_info_for_branch(op->info()); 934 } 935 } else { 936 assert(op->in_opr1() != LIR_OprFact::illegalOpr && op->in_opr2() != LIR_OprFact::illegalOpr, "conditional branches must have legal operands"); 937 } 938 bool is_unordered = (op->ublock() == op->block()); 939 emit_branch(condition, op->in_opr1(), op->in_opr2(), *op->label(), /* is_far */ true, is_unordered); 940 } 941 942 void LIR_Assembler::emit_branch(LIR_Condition cmp_flag, LIR_Opr cmp1, LIR_Opr cmp2, Label& label, 943 bool is_far, bool is_unordered) { 944 945 if (cmp_flag == lir_cond_always) { 946 __ j(label); 947 return; 948 } 949 950 if (cmp1->is_cpu_register()) { 951 Register reg1 = as_reg(cmp1); 952 if (cmp2->is_cpu_register()) { 953 Register reg2 = as_reg(cmp2); 954 __ c1_cmp_branch(cmp_flag, reg1, reg2, label, cmp1->type(), is_far); 955 } else if (cmp2->is_constant()) { 956 const2reg_helper(cmp2); 957 __ c1_cmp_branch(cmp_flag, reg1, t0, label, cmp2->type(), is_far); 958 } else { 959 ShouldNotReachHere(); 960 } 961 } else if (cmp1->is_single_fpu()) { 962 assert(cmp2->is_single_fpu(), "expect single float register"); 963 __ c1_float_cmp_branch(cmp_flag, cmp1->as_float_reg(), cmp2->as_float_reg(), label, is_far, is_unordered); 964 } else if (cmp1->is_double_fpu()) { 965 assert(cmp2->is_double_fpu(), "expect double float register"); 966 __ c1_float_cmp_branch(cmp_flag | C1_MacroAssembler::c1_double_branch_mask, 967 cmp1->as_double_reg(), cmp2->as_double_reg(), label, is_far, is_unordered); 968 } else { 969 ShouldNotReachHere(); 970 } 971 } 972 973 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 974 LIR_Opr src = op->in_opr(); 975 LIR_Opr dest = op->result_opr(); 976 977 switch (op->bytecode()) { 978 case Bytecodes::_i2f: 979 __ fcvt_s_w(dest->as_float_reg(), src->as_register()); break; 980 case Bytecodes::_i2d: 981 __ fcvt_d_w(dest->as_double_reg(), src->as_register()); break; 982 case Bytecodes::_l2d: 983 __ fcvt_d_l(dest->as_double_reg(), src->as_register_lo()); break; 984 case Bytecodes::_l2f: 985 __ fcvt_s_l(dest->as_float_reg(), src->as_register_lo()); break; 986 case Bytecodes::_f2d: 987 __ fcvt_d_s(dest->as_double_reg(), src->as_float_reg()); break; 988 case Bytecodes::_d2f: 989 __ fcvt_s_d(dest->as_float_reg(), src->as_double_reg()); break; 990 case Bytecodes::_i2c: 991 __ zero_extend(dest->as_register(), src->as_register(), 16); break; 992 case Bytecodes::_i2l: 993 __ addw(dest->as_register_lo(), src->as_register(), zr); break; 994 case Bytecodes::_i2s: 995 __ sign_extend(dest->as_register(), src->as_register(), 16); break; 996 case Bytecodes::_i2b: 997 __ sign_extend(dest->as_register(), src->as_register(), 8); break; 998 case Bytecodes::_l2i: 999 _masm->block_comment("FIXME: This coulde be no-op"); 1000 __ addw(dest->as_register(), src->as_register_lo(), zr); break; 1001 case Bytecodes::_d2l: 1002 __ fcvt_l_d_safe(dest->as_register_lo(), src->as_double_reg()); break; 1003 case Bytecodes::_f2i: 1004 __ fcvt_w_s_safe(dest->as_register(), src->as_float_reg()); break; 1005 case Bytecodes::_f2l: 1006 __ fcvt_l_s_safe(dest->as_register_lo(), src->as_float_reg()); break; 1007 case Bytecodes::_d2i: 1008 __ fcvt_w_d_safe(dest->as_register(), src->as_double_reg()); break; 1009 default: 1010 ShouldNotReachHere(); 1011 } 1012 } 1013 1014 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1015 if (op->init_check()) { 1016 __ lbu(t0, Address(op->klass()->as_register(), 1017 InstanceKlass::init_state_offset())); 1018 __ mvw(t1, InstanceKlass::fully_initialized); 1019 add_debug_info_for_null_check_here(op->stub()->info()); 1020 __ bne(t0, t1, *op->stub()->entry(), /* is_far */ true); 1021 } 1022 1023 __ allocate_object(op->obj()->as_register(), 1024 op->tmp1()->as_register(), 1025 op->tmp2()->as_register(), 1026 op->header_size(), 1027 op->object_size(), 1028 op->klass()->as_register(), 1029 *op->stub()->entry()); 1030 1031 __ bind(*op->stub()->continuation()); 1032 } 1033 1034 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1035 Register len = op->len()->as_register(); 1036 1037 if (UseSlowPath || 1038 (!UseFastNewObjectArray && is_reference_type(op->type())) || 1039 (!UseFastNewTypeArray && !is_reference_type(op->type()))) { 1040 __ j(*op->stub()->entry()); 1041 } else { 1042 Register tmp1 = op->tmp1()->as_register(); 1043 Register tmp2 = op->tmp2()->as_register(); 1044 Register tmp3 = op->tmp3()->as_register(); 1045 if (len == tmp1) { 1046 tmp1 = tmp3; 1047 } else if (len == tmp2) { 1048 tmp2 = tmp3; 1049 } else if (len == tmp3) { 1050 // everything is ok 1051 } else { 1052 __ mv(tmp3, len); 1053 } 1054 __ allocate_array(op->obj()->as_register(), 1055 len, 1056 tmp1, 1057 tmp2, 1058 arrayOopDesc::header_size(op->type()), 1059 array_element_size(op->type()), 1060 op->klass()->as_register(), 1061 *op->stub()->entry()); 1062 } 1063 __ bind(*op->stub()->continuation()); 1064 } 1065 1066 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, 1067 Register recv, Label* update_done) { 1068 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1069 Label next_test; 1070 // See if the receiver is receiver[n]. 1071 __ ld(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1072 __ bne(recv, t1, next_test); 1073 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1074 __ add_memory_int64(data_addr, DataLayout::counter_increment); 1075 __ j(*update_done); 1076 __ bind(next_test); 1077 } 1078 1079 // Didn't find receiver; find next empty slot and fill it in 1080 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1081 Label next_test; 1082 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1083 __ ld(t1, recv_addr); 1084 __ bnez(t1, next_test); 1085 __ sd(recv, recv_addr); 1086 __ li(t1, DataLayout::counter_increment); 1087 __ sd(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)))); 1088 __ j(*update_done); 1089 __ bind(next_test); 1090 } 1091 } 1092 1093 void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) { 1094 ciMethod* method = op->profiled_method(); 1095 assert(method != NULL, "Should have method"); 1096 int bci = op->profiled_bci(); 1097 *md = method->method_data_or_null(); 1098 guarantee(*md != NULL, "Sanity"); 1099 *data = ((*md)->bci_to_data(bci)); 1100 assert(*data != NULL, "need data for type check"); 1101 assert((*data)->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1102 } 1103 1104 void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Register Rtmp1, 1105 Register k_RInfo, Register klass_RInfo, 1106 Label *failure_target, Label *success_target) { 1107 // get object class 1108 // not a safepoint as obj null check happens earlier 1109 __ load_klass(klass_RInfo, obj); 1110 if (k->is_loaded()) { 1111 // See if we get an immediate positive hit 1112 __ ld(t0, Address(klass_RInfo, int64_t(k->super_check_offset()))); 1113 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1114 __ bne(k_RInfo, t0, *failure_target, /* is_far */ true); 1115 // successful cast, fall through to profile or jump 1116 } else { 1117 // See if we get an immediate positive hit 1118 __ beq(k_RInfo, t0, *success_target); 1119 // check for self 1120 __ beq(klass_RInfo, k_RInfo, *success_target); 1121 1122 __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo 1123 __ sd(k_RInfo, Address(sp, 0)); // sub klass 1124 __ sd(klass_RInfo, Address(sp, wordSize)); // super klass 1125 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1126 // load result to k_RInfo 1127 __ ld(k_RInfo, Address(sp, 0)); 1128 __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo 1129 // result is a boolean 1130 __ beqz(k_RInfo, *failure_target, /* is_far */ true); 1131 // successful cast, fall through to profile or jump 1132 } 1133 } else { 1134 // perform the fast part of the checking logic 1135 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1136 // call out-of-line instance of __ check_klass_subtytpe_slow_path(...) 1137 __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo 1138 __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass 1139 __ sd(k_RInfo, Address(sp, 0)); // super klass 1140 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1141 // load result to k_RInfo 1142 __ ld(k_RInfo, Address(sp, 0)); 1143 __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo 1144 // result is a boolean 1145 __ beqz(k_RInfo, *failure_target, /* is_far */ true); 1146 // successful cast, fall thriugh to profile or jump 1147 } 1148 } 1149 1150 void LIR_Assembler::profile_object(ciMethodData* md, ciProfileData* data, Register obj, 1151 Register klass_RInfo, Label* obj_is_null) { 1152 Label not_null; 1153 __ bnez(obj, not_null); 1154 // Object is null, update MDO and exit 1155 Register mdo = klass_RInfo; 1156 __ mov_metadata(mdo, md->constant_encoding()); 1157 Address data_addr = __ form_address(t1, mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); 1158 __ lbu(t0, data_addr); 1159 __ ori(t0, t0, BitData::null_seen_byte_constant()); 1160 __ sb(t0, data_addr); 1161 __ j(*obj_is_null); 1162 __ bind(not_null); 1163 } 1164 1165 void LIR_Assembler::typecheck_loaded(LIR_OpTypeCheck *op, ciKlass* k, Register k_RInfo) { 1166 if (!k->is_loaded()) { 1167 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1168 } else { 1169 __ mov_metadata(k_RInfo, k->constant_encoding()); 1170 } 1171 } 1172 1173 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1174 Register obj = op->object()->as_register(); 1175 Register k_RInfo = op->tmp1()->as_register(); 1176 Register klass_RInfo = op->tmp2()->as_register(); 1177 Register dst = op->result_opr()->as_register(); 1178 ciKlass* k = op->klass(); 1179 Register Rtmp1 = noreg; 1180 1181 // check if it needs to be profiled 1182 ciMethodData* md = NULL; 1183 ciProfileData* data = NULL; 1184 1185 const bool should_profile = op->should_profile(); 1186 if (should_profile) { 1187 data_check(op, &md, &data); 1188 } 1189 Label profile_cast_success, profile_cast_failure; 1190 Label *success_target = should_profile ? &profile_cast_success : success; 1191 Label *failure_target = should_profile ? &profile_cast_failure : failure; 1192 1193 if (obj == k_RInfo) { 1194 k_RInfo = dst; 1195 } else if (obj == klass_RInfo) { 1196 klass_RInfo = dst; 1197 } 1198 if (k->is_loaded() && !UseCompressedClassPointers) { 1199 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1200 } else { 1201 Rtmp1 = op->tmp3()->as_register(); 1202 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1203 } 1204 1205 assert_different_registers(obj, k_RInfo, klass_RInfo); 1206 1207 if (should_profile) { 1208 profile_object(md, data, obj, klass_RInfo, obj_is_null); 1209 } else { 1210 __ beqz(obj, *obj_is_null); 1211 } 1212 1213 typecheck_loaded(op, k, k_RInfo); 1214 __ verify_oop(obj); 1215 1216 if (op->fast_check()) { 1217 // get object class 1218 // not a safepoint as obj null check happens earlier 1219 __ load_klass(t0, obj); 1220 __ bne(t0, k_RInfo, *failure_target, /* is_far */ true); 1221 // successful cast, fall through to profile or jump 1222 } else { 1223 typecheck_helper_slowcheck(k, obj, Rtmp1, k_RInfo, klass_RInfo, failure_target, success_target); 1224 } 1225 if (should_profile) { 1226 type_profile(obj, md, klass_RInfo, k_RInfo, data, success, failure, profile_cast_success, profile_cast_failure); 1227 } 1228 __ j(*success); 1229 } 1230 1231 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1232 const bool should_profile = op->should_profile(); 1233 1234 LIR_Code code = op->code(); 1235 if (code == lir_store_check) { 1236 typecheck_lir_store(op, should_profile); 1237 } else if (code == lir_checkcast) { 1238 Register obj = op->object()->as_register(); 1239 Register dst = op->result_opr()->as_register(); 1240 Label success; 1241 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1242 __ bind(success); 1243 if (dst != obj) { 1244 __ mv(dst, obj); 1245 } 1246 } else if (code == lir_instanceof) { 1247 Register obj = op->object()->as_register(); 1248 Register dst = op->result_opr()->as_register(); 1249 Label success, failure, done; 1250 emit_typecheck_helper(op, &success, &failure, &failure); 1251 __ bind(failure); 1252 __ mv(dst, zr); 1253 __ j(done); 1254 __ bind(success); 1255 __ mv(dst, 1); 1256 __ bind(done); 1257 } else { 1258 ShouldNotReachHere(); 1259 } 1260 } 1261 1262 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1263 assert(VM_Version::supports_cx8(), "wrong machine"); 1264 Register addr; 1265 if (op->addr()->is_register()) { 1266 addr = as_reg(op->addr()); 1267 } else { 1268 assert(op->addr()->is_address(), "what else?"); 1269 LIR_Address* addr_ptr = op->addr()->as_address_ptr(); 1270 assert(addr_ptr->disp() == 0, "need 0 disp"); 1271 assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index"); 1272 addr = as_reg(addr_ptr->base()); 1273 } 1274 Register newval = as_reg(op->new_value()); 1275 Register cmpval = as_reg(op->cmp_value()); 1276 1277 if (op->code() == lir_cas_obj) { 1278 if (UseCompressedOops) { 1279 Register tmp1 = op->tmp1()->as_register(); 1280 assert(op->tmp1()->is_valid(), "must be"); 1281 __ encode_heap_oop(tmp1, cmpval); 1282 cmpval = tmp1; 1283 __ encode_heap_oop(t1, newval); 1284 newval = t1; 1285 caswu(addr, newval, cmpval); 1286 } else { 1287 casl(addr, newval, cmpval); 1288 } 1289 } else if (op->code() == lir_cas_int) { 1290 casw(addr, newval, cmpval); 1291 } else { 1292 casl(addr, newval, cmpval); 1293 } 1294 } 1295 1296 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 1297 switch (code) { 1298 case lir_abs: __ fabs_d(dest->as_double_reg(), value->as_double_reg()); break; 1299 case lir_sqrt: __ fsqrt_d(dest->as_double_reg(), value->as_double_reg()); break; 1300 default: ShouldNotReachHere(); 1301 } 1302 } 1303 1304 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1305 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register"); 1306 Register Rleft = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); 1307 if (dst->is_single_cpu()) { 1308 Register Rdst = dst->as_register(); 1309 if (right->is_constant()) { 1310 int right_const = right->as_jint(); 1311 if (Assembler::operand_valid_for_add_immediate(right_const)) { 1312 logic_op_imm(Rdst, Rleft, right_const, code); 1313 __ addw(Rdst, Rdst, zr); 1314 } else { 1315 __ mv(t0, right_const); 1316 logic_op_reg32(Rdst, Rleft, t0, code); 1317 } 1318 } else { 1319 Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo(); 1320 logic_op_reg32(Rdst, Rleft, Rright, code); 1321 } 1322 } else { 1323 Register Rdst = dst->as_register_lo(); 1324 if (right->is_constant()) { 1325 long right_const = right->as_jlong(); 1326 if (Assembler::operand_valid_for_add_immediate(right_const)) { 1327 logic_op_imm(Rdst, Rleft, right_const, code); 1328 } else { 1329 __ mv(t0, right_const); 1330 logic_op_reg(Rdst, Rleft, t0, code); 1331 } 1332 } else { 1333 Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo(); 1334 logic_op_reg(Rdst, Rleft, Rright, code); 1335 } 1336 } 1337 } 1338 1339 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op) { 1340 ShouldNotCallThis(); 1341 } 1342 1343 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1344 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1345 bool is_unordered_less = (code == lir_ucmp_fd2i); 1346 if (left->is_single_fpu()) { 1347 __ float_cmp(true, is_unordered_less ? -1 : 1, 1348 left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1349 } else if (left->is_double_fpu()) { 1350 __ float_cmp(false, is_unordered_less ? -1 : 1, 1351 left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1352 } else { 1353 ShouldNotReachHere(); 1354 } 1355 } else if (code == lir_cmp_l2i) { 1356 __ cmp_l2i(dst->as_register(), left->as_register_lo(), right->as_register_lo()); 1357 } else { 1358 ShouldNotReachHere(); 1359 } 1360 } 1361 1362 void LIR_Assembler::align_call(LIR_Code code) { 1363 // With RVC a call instruction may get 2-byte aligned. 1364 // The address of the call instruction needs to be 4-byte aligned to 1365 // ensure that it does not span a cache line so that it can be patched. 1366 __ align(4); 1367 } 1368 1369 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 1370 address call = __ trampoline_call(Address(op->addr(), rtype)); 1371 if (call == NULL) { 1372 bailout("trampoline stub overflow"); 1373 return; 1374 } 1375 add_call_info(code_offset(), op->info()); 1376 } 1377 1378 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 1379 address call = __ ic_call(op->addr()); 1380 if (call == NULL) { 1381 bailout("trampoline stub overflow"); 1382 return; 1383 } 1384 add_call_info(code_offset(), op->info()); 1385 } 1386 1387 void LIR_Assembler::emit_static_call_stub() { 1388 address call_pc = __ pc(); 1389 assert((__ offset() % 4) == 0, "bad alignment"); 1390 address stub = __ start_a_stub(call_stub_size()); 1391 if (stub == NULL) { 1392 bailout("static call stub overflow"); 1393 return; 1394 } 1395 1396 int start = __ offset(); 1397 1398 __ relocate(static_stub_Relocation::spec(call_pc)); 1399 __ emit_static_call_stub(); 1400 1401 assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size() 1402 <= call_stub_size(), "stub too big"); 1403 __ end_a_stub(); 1404 } 1405 1406 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1407 assert(exceptionOop->as_register() == x10, "must match"); 1408 assert(exceptionPC->as_register() == x13, "must match"); 1409 1410 // exception object is not added to oop map by LinearScan 1411 // (LinearScan assumes that no oops are in fixed registers) 1412 info->add_register_oop(exceptionOop); 1413 Runtime1::StubID unwind_id; 1414 1415 // get current pc information 1416 // pc is only needed if the method has an exception handler, the unwind code does not need it. 1417 if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) { 1418 // As no instructions have been generated yet for this LIR node it's 1419 // possible that an oop map already exists for the current offset. 1420 // In that case insert an dummy NOP here to ensure all oop map PCs 1421 // are unique. See JDK-8237483. 1422 __ nop(); 1423 } 1424 int pc_for_athrow_offset = __ offset(); 1425 InternalAddress pc_for_athrow(__ pc()); 1426 int32_t off = 0; 1427 __ la_patchable(exceptionPC->as_register(), pc_for_athrow, off); 1428 __ addi(exceptionPC->as_register(), exceptionPC->as_register(), off); 1429 add_call_info(pc_for_athrow_offset, info); // for exception handler 1430 1431 __ verify_not_null_oop(x10); 1432 // search an exception handler (x10: exception oop, x13: throwing pc) 1433 if (compilation()->has_fpu_code()) { 1434 unwind_id = Runtime1::handle_exception_id; 1435 } else { 1436 unwind_id = Runtime1::handle_exception_nofpu_id; 1437 } 1438 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 1439 __ nop(); 1440 } 1441 1442 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1443 assert(exceptionOop->as_register() == x10, "must match"); 1444 __ j(_unwind_handler_entry); 1445 } 1446 1447 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 1448 Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); 1449 Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); 1450 Register count_reg = count->as_register(); 1451 if (dest->is_single_cpu()) { 1452 assert (dest->type() == T_INT, "unexpected result type"); 1453 assert (left->type() == T_INT, "unexpected left type"); 1454 __ andi(t0, count_reg, 31); // should not shift more than 31 bits 1455 switch (code) { 1456 case lir_shl: __ sllw(dest_reg, left_reg, t0); break; 1457 case lir_shr: __ sraw(dest_reg, left_reg, t0); break; 1458 case lir_ushr: __ srlw(dest_reg, left_reg, t0); break; 1459 default: ShouldNotReachHere(); 1460 } 1461 } else if (dest->is_double_cpu()) { 1462 __ andi(t0, count_reg, 63); // should not shift more than 63 bits 1463 switch (code) { 1464 case lir_shl: __ sll(dest_reg, left_reg, t0); break; 1465 case lir_shr: __ sra(dest_reg, left_reg, t0); break; 1466 case lir_ushr: __ srl(dest_reg, left_reg, t0); break; 1467 default: ShouldNotReachHere(); 1468 } 1469 } else { 1470 ShouldNotReachHere(); 1471 } 1472 } 1473 1474 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 1475 Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); 1476 Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); 1477 if (dest->is_single_cpu()) { 1478 assert (dest->type() == T_INT, "unexpected result type"); 1479 assert (left->type() == T_INT, "unexpected left type"); 1480 count &= 0x1f; 1481 if (count != 0) { 1482 switch (code) { 1483 case lir_shl: __ slliw(dest_reg, left_reg, count); break; 1484 case lir_shr: __ sraiw(dest_reg, left_reg, count); break; 1485 case lir_ushr: __ srliw(dest_reg, left_reg, count); break; 1486 default: ShouldNotReachHere(); 1487 } 1488 } else { 1489 move_regs(left_reg, dest_reg); 1490 } 1491 } else if (dest->is_double_cpu()) { 1492 count &= 0x3f; 1493 if (count != 0) { 1494 switch (code) { 1495 case lir_shl: __ slli(dest_reg, left_reg, count); break; 1496 case lir_shr: __ srai(dest_reg, left_reg, count); break; 1497 case lir_ushr: __ srli(dest_reg, left_reg, count); break; 1498 default: ShouldNotReachHere(); 1499 } 1500 } else { 1501 move_regs(left->as_register_lo(), dest->as_register_lo()); 1502 } 1503 } else { 1504 ShouldNotReachHere(); 1505 } 1506 } 1507 1508 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 1509 Register obj = op->obj_opr()->as_register(); // may not be an oop 1510 Register hdr = op->hdr_opr()->as_register(); 1511 Register lock = op->lock_opr()->as_register(); 1512 if (UseHeavyMonitors) { 1513 __ j(*op->stub()->entry()); 1514 } else if (op->code() == lir_lock) { 1515 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 1516 // add debug info for NullPointerException only if one is possible 1517 int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); 1518 if (op->info() != NULL) { 1519 add_debug_info_for_null_check(null_check_offset, op->info()); 1520 } 1521 } else if (op->code() == lir_unlock) { 1522 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 1523 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 1524 } else { 1525 Unimplemented(); 1526 } 1527 __ bind(*op->stub()->continuation()); 1528 } 1529 1530 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 1531 Register obj = op->obj()->as_pointer_register(); 1532 Register result = op->result_opr()->as_pointer_register(); 1533 1534 CodeEmitInfo* info = op->info(); 1535 if (info != NULL) { 1536 add_debug_info_for_null_check_here(info); 1537 } 1538 1539 if (UseCompressedClassPointers) { 1540 __ lwu(result, Address(obj, oopDesc::klass_offset_in_bytes())); 1541 __ decode_klass_not_null(result); 1542 } else { 1543 __ ld(result, Address(obj, oopDesc::klass_offset_in_bytes())); 1544 } 1545 } 1546 1547 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 1548 ciMethod* method = op->profiled_method(); 1549 int bci = op->profiled_bci(); 1550 1551 // Update counter for all call types 1552 ciMethodData* md = method->method_data_or_null(); 1553 guarantee(md != NULL, "Sanity"); 1554 ciProfileData* data = md->bci_to_data(bci); 1555 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 1556 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 1557 Register mdo = op->mdo()->as_register(); 1558 __ mov_metadata(mdo, md->constant_encoding()); 1559 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1560 // Perform additional virtual call profiling for invokevirtual and 1561 // invokeinterface bytecodes 1562 if (op->should_profile_receiver_type()) { 1563 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 1564 Register recv = op->recv()->as_register(); 1565 assert_different_registers(mdo, recv); 1566 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 1567 ciKlass* known_klass = op->known_holder(); 1568 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 1569 // We know the type that will be seen at this call site; we can 1570 // statically update the MethodData* rather than needing to do 1571 // dynamic tests on the receiver type 1572 // NOTE: we should probably put a lock around this search to 1573 // avoid collisions by concurrent compilations 1574 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 1575 uint i; 1576 for (i = 0; i < VirtualCallData::row_limit(); i++) { 1577 ciKlass* receiver = vc_data->receiver(i); 1578 if (known_klass->equals(receiver)) { 1579 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 1580 __ add_memory_int64(data_addr, DataLayout::counter_increment); 1581 return; 1582 } 1583 } 1584 1585 // Receiver type not found in profile data; select an empty slot 1586 // Note that this is less efficient than it should be because it 1587 // always does a write to the receiver part of the 1588 // VirtualCallData rather than just the first time 1589 for (i = 0; i < VirtualCallData::row_limit(); i++) { 1590 ciKlass* receiver = vc_data->receiver(i); 1591 if (receiver == NULL) { 1592 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 1593 __ mov_metadata(t1, known_klass->constant_encoding()); 1594 __ sd(t1, recv_addr); 1595 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 1596 __ add_memory_int64(data_addr, DataLayout::counter_increment); 1597 return; 1598 } 1599 } 1600 } else { 1601 __ load_klass(recv, recv); 1602 Label update_done; 1603 type_profile_helper(mdo, md, data, recv, &update_done); 1604 // Receiver did not match any saved receiver and there is no empty row for it. 1605 // Increment total counter to indicate polymorphic case. 1606 __ add_memory_int64(counter_addr, DataLayout::counter_increment); 1607 1608 __ bind(update_done); 1609 } 1610 } else { 1611 // Static call 1612 __ add_memory_int64(counter_addr, DataLayout::counter_increment); 1613 } 1614 } 1615 1616 void LIR_Assembler::emit_delay(LIR_OpDelay*) { Unimplemented(); } 1617 1618 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 1619 __ la(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 1620 } 1621 1622 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { Unimplemented(); } 1623 1624 void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass, 1625 Register tmp, Label &next, Label &none, 1626 Address mdo_addr) { 1627 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 1628 if (exact_klass != NULL) { 1629 __ mov_metadata(tmp, exact_klass->constant_encoding()); 1630 } else { 1631 __ load_klass(tmp, tmp); 1632 } 1633 1634 __ ld(t1, mdo_addr); 1635 __ xorr(tmp, tmp, t1); 1636 __ andi(t0, tmp, TypeEntries::type_klass_mask); 1637 // klass seen before, nothing to do. The unknown bit may have been 1638 // set already but no need to check. 1639 __ beqz(t0, next); 1640 1641 // already unknown. Nothing to do anymore. 1642 __ andi(t0, tmp, TypeEntries::type_unknown); 1643 __ bnez(t0, next); 1644 1645 if (TypeEntries::is_type_none(current_klass)) { 1646 __ beqz(t1, none); 1647 __ li(t0, (u1)TypeEntries::null_seen); 1648 __ beq(t0, t1, none); 1649 // There is a chance that the checks above (re-reading profiling 1650 // data from memory) fail if another thread has just set the 1651 // profiling to this obj's klass 1652 __ membar(MacroAssembler::LoadLoad); 1653 __ ld(t1, mdo_addr); 1654 __ xorr(tmp, tmp, t1); 1655 __ andi(t0, tmp, TypeEntries::type_klass_mask); 1656 __ beqz(t0, next); 1657 } 1658 } else { 1659 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 1660 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 1661 1662 __ ld(tmp, mdo_addr); 1663 // already unknown. Nothing to do anymore. 1664 __ andi(t0, tmp, TypeEntries::type_unknown); 1665 __ bnez(t0, next); 1666 } 1667 1668 // different than before. Cannot keep accurate profile. 1669 __ ld(t1, mdo_addr); 1670 __ ori(t1, t1, TypeEntries::type_unknown); 1671 __ sd(t1, mdo_addr); 1672 1673 if (TypeEntries::is_type_none(current_klass)) { 1674 __ j(next); 1675 1676 __ bind(none); 1677 // first time here. Set profile type. 1678 __ sd(tmp, mdo_addr); 1679 } 1680 } 1681 1682 void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp, 1683 Address mdo_addr, Label &next) { 1684 // There's a single possible klass at this profile point 1685 assert(exact_klass != NULL, "should be"); 1686 if (TypeEntries::is_type_none(current_klass)) { 1687 __ mov_metadata(tmp, exact_klass->constant_encoding()); 1688 __ ld(t1, mdo_addr); 1689 __ xorr(tmp, tmp, t1); 1690 __ andi(t0, tmp, TypeEntries::type_klass_mask); 1691 __ beqz(t0, next); 1692 #ifdef ASSERT 1693 { 1694 Label ok; 1695 __ ld(t0, mdo_addr); 1696 __ beqz(t0, ok); 1697 __ li(t1, (u1)TypeEntries::null_seen); 1698 __ beq(t0, t1, ok); 1699 // may have been set by another thread 1700 __ membar(MacroAssembler::LoadLoad); 1701 __ mov_metadata(t0, exact_klass->constant_encoding()); 1702 __ ld(t1, mdo_addr); 1703 __ xorr(t1, t0, t1); 1704 __ andi(t1, t1, TypeEntries::type_mask); 1705 __ beqz(t1, ok); 1706 1707 __ stop("unexpected profiling mismatch"); 1708 __ bind(ok); 1709 } 1710 #endif 1711 // first time here. Set profile type. 1712 __ sd(tmp, mdo_addr); 1713 } else { 1714 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 1715 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 1716 1717 __ ld(tmp, mdo_addr); 1718 // already unknown. Nothing to do anymore. 1719 __ andi(t0, tmp, TypeEntries::type_unknown); 1720 __ bnez(t0, next); 1721 1722 __ ori(tmp, tmp, TypeEntries::type_unknown); 1723 __ sd(tmp, mdo_addr); 1724 } 1725 } 1726 1727 void LIR_Assembler::check_null(Register tmp, Label &update, intptr_t current_klass, 1728 Address mdo_addr, bool do_update, Label &next) { 1729 __ bnez(tmp, update); 1730 if (!TypeEntries::was_null_seen(current_klass)) { 1731 __ ld(t1, mdo_addr); 1732 __ ori(t1, t1, TypeEntries::null_seen); 1733 __ sd(t1, mdo_addr); 1734 } 1735 if (do_update) { 1736 __ j(next); 1737 } 1738 } 1739 1740 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 1741 COMMENT("emit_profile_type {"); 1742 Register obj = op->obj()->as_register(); 1743 Register tmp = op->tmp()->as_pointer_register(); 1744 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 1745 ciKlass* exact_klass = op->exact_klass(); 1746 intptr_t current_klass = op->current_klass(); 1747 bool not_null = op->not_null(); 1748 bool no_conflict = op->no_conflict(); 1749 1750 Label update, next, none; 1751 1752 bool do_null = !not_null; 1753 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 1754 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 1755 1756 assert(do_null || do_update, "why are we here?"); 1757 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 1758 assert_different_registers(tmp, t0, t1, mdo_addr.base()); 1759 1760 __ verify_oop(obj); 1761 1762 if (tmp != obj) { 1763 __ mv(tmp, obj); 1764 } 1765 if (do_null) { 1766 check_null(tmp, update, current_klass, mdo_addr, do_update, next); 1767 #ifdef ASSERT 1768 } else { 1769 __ bnez(tmp, update); 1770 __ stop("unexpected null obj"); 1771 #endif 1772 } 1773 1774 __ bind(update); 1775 1776 if (do_update) { 1777 #ifdef ASSERT 1778 if (exact_klass != NULL) { 1779 check_exact_klass(tmp, exact_klass); 1780 } 1781 #endif 1782 if (!no_conflict) { 1783 check_conflict(exact_klass, current_klass, tmp, next, none, mdo_addr); 1784 } else { 1785 check_no_conflict(exact_klass, current_klass, tmp, mdo_addr, next); 1786 } 1787 1788 __ bind(next); 1789 } 1790 COMMENT("} emit_profile_type"); 1791 } 1792 1793 void LIR_Assembler::align_backward_branch_target() { } 1794 1795 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 1796 // tmp must be unused 1797 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 1798 1799 if (left->is_single_cpu()) { 1800 assert(dest->is_single_cpu(), "expect single result reg"); 1801 __ negw(dest->as_register(), left->as_register()); 1802 } else if (left->is_double_cpu()) { 1803 assert(dest->is_double_cpu(), "expect double result reg"); 1804 __ neg(dest->as_register_lo(), left->as_register_lo()); 1805 } else if (left->is_single_fpu()) { 1806 assert(dest->is_single_fpu(), "expect single float result reg"); 1807 __ fneg_s(dest->as_float_reg(), left->as_float_reg()); 1808 } else { 1809 assert(left->is_double_fpu(), "expect double float operand reg"); 1810 assert(dest->is_double_fpu(), "expect double float result reg"); 1811 __ fneg_d(dest->as_double_reg(), left->as_double_reg()); 1812 } 1813 } 1814 1815 1816 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 1817 if (patch_code != lir_patch_none) { 1818 deoptimize_trap(info); 1819 return; 1820 } 1821 1822 LIR_Address* adr = addr->as_address_ptr(); 1823 Register dst = dest->as_register_lo(); 1824 1825 assert_different_registers(dst, t0); 1826 if (adr->base()->is_valid() && dst == adr->base()->as_pointer_register() && (!adr->index()->is_cpu_register())) { 1827 int scale = adr->scale(); 1828 intptr_t offset = adr->disp(); 1829 LIR_Opr index_op = adr->index(); 1830 if (index_op->is_constant()) { 1831 offset += ((intptr_t)index_op->as_constant_ptr()->as_jint()) << scale; 1832 } 1833 1834 if (!is_imm_in_range(offset, 12, 0)) { 1835 __ la(t0, as_Address(adr)); 1836 __ mv(dst, t0); 1837 return; 1838 } 1839 } 1840 1841 __ la(dst, as_Address(adr)); 1842 } 1843 1844 1845 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 1846 assert(!tmp->is_valid(), "don't need temporary"); 1847 1848 CodeBlob *cb = CodeCache::find_blob(dest); 1849 if (cb != NULL) { 1850 __ far_call(RuntimeAddress(dest)); 1851 } else { 1852 int32_t offset = 0; 1853 __ la_patchable(t0, RuntimeAddress(dest), offset); 1854 __ jalr(x1, t0, offset); 1855 } 1856 1857 if (info != NULL) { 1858 add_call_info_here(info); 1859 } 1860 } 1861 1862 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 1863 if (dest->is_address() || src->is_address()) { 1864 move_op(src, dest, type, lir_patch_none, info, /* pop_fpu_stack */ false, /* wide */ false); 1865 } else { 1866 ShouldNotReachHere(); 1867 } 1868 } 1869 1870 #ifdef ASSERT 1871 // emit run-time assertion 1872 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 1873 assert(op->code() == lir_assert, "must be"); 1874 1875 Label ok; 1876 if (op->in_opr1()->is_valid()) { 1877 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 1878 bool is_unordered = false; 1879 LIR_Condition cond = op->condition(); 1880 emit_branch(cond, op->in_opr1(), op->in_opr2(), ok, /* is_far */ false, 1881 /* is_unordered */(cond == lir_cond_greaterEqual || cond == lir_cond_greater) ? false : true); 1882 } else { 1883 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 1884 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 1885 } 1886 1887 if (op->halt()) { 1888 const char* str = __ code_string(op->msg()); 1889 __ stop(str); 1890 } else { 1891 breakpoint(); 1892 } 1893 __ bind(ok); 1894 } 1895 #endif 1896 1897 #ifndef PRODUCT 1898 #define COMMENT(x) do { __ block_comment(x); } while (0) 1899 #else 1900 #define COMMENT(x) 1901 #endif 1902 1903 void LIR_Assembler::membar() { 1904 COMMENT("membar"); 1905 __ membar(MacroAssembler::AnyAny); 1906 } 1907 1908 void LIR_Assembler::membar_acquire() { 1909 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); 1910 } 1911 1912 void LIR_Assembler::membar_release() { 1913 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1914 } 1915 1916 void LIR_Assembler::membar_loadload() { 1917 __ membar(MacroAssembler::LoadLoad); 1918 } 1919 1920 void LIR_Assembler::membar_storestore() { 1921 __ membar(MacroAssembler::StoreStore); 1922 } 1923 1924 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); } 1925 1926 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); } 1927 1928 void LIR_Assembler::on_spin_wait() { 1929 Unimplemented(); 1930 } 1931 1932 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 1933 __ mv(result_reg->as_register(), xthread); 1934 } 1935 1936 void LIR_Assembler::peephole(LIR_List *lir) {} 1937 1938 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) { 1939 Address addr = as_Address(src->as_address_ptr()); 1940 BasicType type = src->type(); 1941 bool is_oop = is_reference_type(type); 1942 1943 get_op(type); 1944 1945 switch (code) { 1946 case lir_xadd: 1947 { 1948 RegisterOrConstant inc; 1949 Register tmp = as_reg(tmp_op); 1950 Register dst = as_reg(dest); 1951 if (data->is_constant()) { 1952 inc = RegisterOrConstant(as_long(data)); 1953 assert_different_registers(dst, addr.base(), tmp); 1954 assert_different_registers(tmp, t0); 1955 } else { 1956 inc = RegisterOrConstant(as_reg(data)); 1957 assert_different_registers(inc.as_register(), dst, addr.base(), tmp); 1958 } 1959 __ la(tmp, addr); 1960 (_masm->*add)(dst, inc, tmp); 1961 break; 1962 } 1963 case lir_xchg: 1964 { 1965 Register tmp = tmp_op->as_register(); 1966 Register obj = as_reg(data); 1967 Register dst = as_reg(dest); 1968 if (is_oop && UseCompressedOops) { 1969 __ encode_heap_oop(t0, obj); 1970 obj = t0; 1971 } 1972 assert_different_registers(obj, addr.base(), tmp, dst); 1973 __ la(tmp, addr); 1974 (_masm->*xchg)(dst, obj, tmp); 1975 if (is_oop && UseCompressedOops) { 1976 __ decode_heap_oop(dst); 1977 } 1978 } 1979 break; 1980 default: 1981 ShouldNotReachHere(); 1982 } 1983 __ membar(MacroAssembler::AnyAny); 1984 } 1985 1986 int LIR_Assembler::array_element_size(BasicType type) const { 1987 int elem_size = type2aelembytes(type); 1988 return exact_log2(elem_size); 1989 } 1990 1991 // helper functions which checks for overflow and sets bailout if it 1992 // occurs. Always returns a valid embeddable pointer but in the 1993 // bailout case the pointer won't be to unique storage. 1994 address LIR_Assembler::float_constant(float f) { 1995 address const_addr = __ float_constant(f); 1996 if (const_addr == NULL) { 1997 bailout("const section overflow"); 1998 return __ code()->consts()->start(); 1999 } else { 2000 return const_addr; 2001 } 2002 } 2003 2004 address LIR_Assembler::double_constant(double d) { 2005 address const_addr = __ double_constant(d); 2006 if (const_addr == NULL) { 2007 bailout("const section overflow"); 2008 return __ code()->consts()->start(); 2009 } else { 2010 return const_addr; 2011 } 2012 } 2013 2014 address LIR_Assembler::int_constant(jlong n) { 2015 address const_addr = __ long_constant(n); 2016 if (const_addr == NULL) { 2017 bailout("const section overflow"); 2018 return __ code()->consts()->start(); 2019 } else { 2020 return const_addr; 2021 } 2022 } 2023 2024 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) { 2025 __ cmpxchg(addr, cmpval, newval, Assembler::int32, Assembler::aq /* acquire */, 2026 Assembler::rl /* release */, t0, true /* result as bool */); 2027 __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1 2028 __ membar(MacroAssembler::AnyAny); 2029 } 2030 2031 void LIR_Assembler::caswu(Register addr, Register newval, Register cmpval) { 2032 __ cmpxchg(addr, cmpval, newval, Assembler::uint32, Assembler::aq /* acquire */, 2033 Assembler::rl /* release */, t0, true /* result as bool */); 2034 __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1 2035 __ membar(MacroAssembler::AnyAny); 2036 } 2037 2038 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { 2039 __ cmpxchg(addr, cmpval, newval, Assembler::int64, Assembler::aq /* acquire */, 2040 Assembler::rl /* release */, t0, true /* result as bool */); 2041 __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1 2042 __ membar(MacroAssembler::AnyAny); 2043 } 2044 2045 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { 2046 address target = NULL; 2047 2048 switch (patching_id(info)) { 2049 case PatchingStub::access_field_id: 2050 target = Runtime1::entry_for(Runtime1::access_field_patching_id); 2051 break; 2052 case PatchingStub::load_klass_id: 2053 target = Runtime1::entry_for(Runtime1::load_klass_patching_id); 2054 break; 2055 case PatchingStub::load_mirror_id: 2056 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); 2057 break; 2058 case PatchingStub::load_appendix_id: 2059 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); 2060 break; 2061 default: ShouldNotReachHere(); 2062 } 2063 2064 __ far_call(RuntimeAddress(target)); 2065 add_call_info_here(info); 2066 } 2067 2068 void LIR_Assembler::check_exact_klass(Register tmp, ciKlass* exact_klass) { 2069 Label ok; 2070 __ load_klass(tmp, tmp); 2071 __ mov_metadata(t0, exact_klass->constant_encoding()); 2072 __ beq(tmp, t0, ok); 2073 __ stop("exact klass and actual klass differ"); 2074 __ bind(ok); 2075 } 2076 2077 void LIR_Assembler::get_op(BasicType type) { 2078 switch (type) { 2079 case T_INT: 2080 xchg = &MacroAssembler::atomic_xchgalw; 2081 add = &MacroAssembler::atomic_addalw; 2082 break; 2083 case T_LONG: 2084 xchg = &MacroAssembler::atomic_xchgal; 2085 add = &MacroAssembler::atomic_addal; 2086 break; 2087 case T_OBJECT: 2088 case T_ARRAY: 2089 if (UseCompressedOops) { 2090 xchg = &MacroAssembler::atomic_xchgalwu; 2091 add = &MacroAssembler::atomic_addalw; 2092 } else { 2093 xchg = &MacroAssembler::atomic_xchgal; 2094 add = &MacroAssembler::atomic_addal; 2095 } 2096 break; 2097 default: 2098 ShouldNotReachHere(); 2099 } 2100 } 2101 2102 // emit_opTypeCheck sub functions 2103 void LIR_Assembler::typecheck_lir_store(LIR_OpTypeCheck* op, bool should_profile) { 2104 Register value = op->object()->as_register(); 2105 Register array = op->array()->as_register(); 2106 Register k_RInfo = op->tmp1()->as_register(); 2107 Register klass_RInfo = op->tmp2()->as_register(); 2108 Register Rtmp1 = op->tmp3()->as_register(); 2109 2110 CodeStub* stub = op->stub(); 2111 2112 // check if it needs to be profiled 2113 ciMethodData* md = NULL; 2114 ciProfileData* data = NULL; 2115 2116 if (should_profile) { 2117 data_check(op, &md, &data); 2118 } 2119 Label profile_cast_success, profile_cast_failure, done; 2120 Label *success_target = should_profile ? &profile_cast_success : &done; 2121 Label *failure_target = should_profile ? &profile_cast_failure : stub->entry(); 2122 2123 if (should_profile) { 2124 profile_object(md, data, value, klass_RInfo, &done); 2125 } else { 2126 __ beqz(value, done); 2127 } 2128 2129 add_debug_info_for_null_check_here(op->info_for_exception()); 2130 __ load_klass(k_RInfo, array); 2131 __ load_klass(klass_RInfo, value); 2132 2133 lir_store_slowcheck(k_RInfo, klass_RInfo, Rtmp1, success_target, failure_target); 2134 2135 // fall through to the success case 2136 if (should_profile) { 2137 Register mdo = klass_RInfo; 2138 Register recv = k_RInfo; 2139 __ bind(profile_cast_success); 2140 __ mov_metadata(mdo, md->constant_encoding()); 2141 __ load_klass(recv, value); 2142 type_profile_helper(mdo, md, data, recv, &done); 2143 __ j(done); 2144 2145 __ bind(profile_cast_failure); 2146 __ mov_metadata(mdo, md->constant_encoding()); 2147 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2148 __ ld(t1, counter_addr); 2149 __ addi(t1, t1, -DataLayout::counter_increment); 2150 __ sd(t1, counter_addr); 2151 __ j(*stub->entry()); 2152 } 2153 2154 __ bind(done); 2155 } 2156 2157 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) { 2158 _masm->code_section()->relocate(adr, relocInfo::poll_type); 2159 int pc_offset = code_offset(); 2160 flush_debug_info(pc_offset); 2161 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 2162 if (info->exception_handlers() != NULL) { 2163 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 2164 } 2165 } 2166 2167 void LIR_Assembler::type_profile(Register obj, ciMethodData* md, Register klass_RInfo, Register k_RInfo, 2168 ciProfileData* data, Label* success, Label* failure, 2169 Label& profile_cast_success, Label& profile_cast_failure) { 2170 Register mdo = klass_RInfo; 2171 Register recv = k_RInfo; 2172 __ bind(profile_cast_success); 2173 __ mov_metadata(mdo, md->constant_encoding()); 2174 __ load_klass(recv, obj); 2175 Label update_done; 2176 type_profile_helper(mdo, md, data, recv, success); 2177 __ j(*success); 2178 2179 __ bind(profile_cast_failure); 2180 __ mov_metadata(mdo, md->constant_encoding()); 2181 Address counter_addr = __ form_address(t1, mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2182 __ ld(t0, counter_addr); 2183 __ addi(t0, t0, -DataLayout::counter_increment); 2184 __ sd(t0, counter_addr); 2185 __ j(*failure); 2186 } 2187 2188 void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo, Register Rtmp1, 2189 Label* success_target, Label* failure_target) { 2190 // get instance klass (it's already uncompressed) 2191 __ ld(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 2192 // perform the fast part of the checking logic 2193 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 2194 // call out-of-line instance of __ check_klass_subtype_slow_path(...) 2195 __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo 2196 __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass 2197 __ sd(k_RInfo, Address(sp, 0)); // super klass 2198 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 2199 // load result to k_RInfo 2200 __ ld(k_RInfo, Address(sp, 0)); 2201 __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo 2202 // result is a boolean 2203 __ beqz(k_RInfo, *failure_target, /* is_far */ true); 2204 } 2205 2206 void LIR_Assembler::const2reg_helper(LIR_Opr src) { 2207 switch (src->as_constant_ptr()->type()) { 2208 case T_INT: 2209 case T_ADDRESS: 2210 case T_OBJECT: 2211 case T_ARRAY: 2212 case T_METADATA: 2213 const2reg(src, FrameMap::t0_opr, lir_patch_none, NULL); 2214 break; 2215 case T_LONG: 2216 const2reg(src, FrameMap::t0_long_opr, lir_patch_none, NULL); 2217 break; 2218 case T_FLOAT: 2219 case T_DOUBLE: 2220 default: 2221 ShouldNotReachHere(); 2222 } 2223 } 2224 2225 void LIR_Assembler::logic_op_reg32(Register dst, Register left, Register right, LIR_Code code) { 2226 switch (code) { 2227 case lir_logic_and: __ andrw(dst, left, right); break; 2228 case lir_logic_or: __ orrw (dst, left, right); break; 2229 case lir_logic_xor: __ xorrw(dst, left, right); break; 2230 default: ShouldNotReachHere(); 2231 } 2232 } 2233 2234 void LIR_Assembler::logic_op_reg(Register dst, Register left, Register right, LIR_Code code) { 2235 switch (code) { 2236 case lir_logic_and: __ andr(dst, left, right); break; 2237 case lir_logic_or: __ orr (dst, left, right); break; 2238 case lir_logic_xor: __ xorr(dst, left, right); break; 2239 default: ShouldNotReachHere(); 2240 } 2241 } 2242 2243 void LIR_Assembler::logic_op_imm(Register dst, Register left, int right, LIR_Code code) { 2244 switch (code) { 2245 case lir_logic_and: __ andi(dst, left, right); break; 2246 case lir_logic_or: __ ori (dst, left, right); break; 2247 case lir_logic_xor: __ xori(dst, left, right); break; 2248 default: ShouldNotReachHere(); 2249 } 2250 } 2251 2252 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 2253 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2254 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2255 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2256 __ sd(r, Address(sp, offset_from_rsp_in_bytes)); 2257 } 2258 2259 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 2260 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2261 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2262 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2263 __ li(t0, c); 2264 __ sd(t0, Address(sp, offset_from_rsp_in_bytes)); 2265 } 2266 2267 #undef __