1 /* 2 * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2024 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "memory/universe.hpp" 36 #include "nativeInst_s390.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/safepointMechanism.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "utilities/macros.hpp" 43 #include "utilities/powerOfTwo.hpp" 44 #include "vmreg_s390.inline.hpp" 45 46 #define __ _masm-> 47 48 #ifndef PRODUCT 49 #undef __ 50 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm) : _masm)-> 51 #endif 52 53 //------------------------------------------------------------ 54 55 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 56 // Not used on ZARCH_64 57 ShouldNotCallThis(); 58 return false; 59 } 60 61 LIR_Opr LIR_Assembler::receiverOpr() { 62 return FrameMap::Z_R2_oop_opr; 63 } 64 65 LIR_Opr LIR_Assembler::osrBufferPointer() { 66 return FrameMap::Z_R2_opr; 67 } 68 69 int LIR_Assembler::initial_frame_size_in_bytes() const { 70 return in_bytes(frame_map()->framesize_in_bytes()); 71 } 72 73 // Inline cache check: done before the frame is built. 74 // The inline cached class is in Z_inline_cache(Z_R9). 75 // We fetch the class of the receiver and compare it with the cached class. 76 // If they do not match we jump to the slow case. 77 int LIR_Assembler::check_icache() { 78 return __ ic_check(CodeEntryAlignment); 79 } 80 81 void LIR_Assembler::clinit_barrier(ciMethod* method) { 82 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 83 84 Label L_skip_barrier; 85 Register klass = Z_R1_scratch; 86 87 metadata2reg(method->holder()->constant_encoding(), klass); 88 __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/); 89 90 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub()); 91 __ z_br(klass); 92 93 __ bind(L_skip_barrier); 94 } 95 96 void LIR_Assembler::osr_entry() { 97 // On-stack-replacement entry sequence (interpreter frame layout described in frame_s390.hpp): 98 // 99 // 1. Create a new compiled activation. 100 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 101 // at the osr_bci; it is not initialized. 102 // 3. Jump to the continuation address in compiled code to resume execution. 103 104 // OSR entry point 105 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 106 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 107 ValueStack* entry_state = osr_entry->end()->state(); 108 int number_of_locks = entry_state->locks_size(); 109 110 // Create a frame for the compiled activation. 111 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 112 113 // OSR buffer is 114 // 115 // locals[nlocals-1..0] 116 // monitors[number_of_locks-1..0] 117 // 118 // Locals is a direct copy of the interpreter frame so in the osr buffer 119 // the first slot in the local array is the last local from the interpreter 120 // and the last slot is local[0] (receiver) from the interpreter 121 // 122 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 123 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 124 // in the interpreter frame (the method lock if a sync method) 125 126 // Initialize monitors in the compiled activation. 127 // I0: pointer to osr buffer 128 // 129 // All other registers are dead at this point and the locals will be 130 // copied into place by code emitted in the IR. 131 132 Register OSR_buf = osrBufferPointer()->as_register(); 133 { 134 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 135 136 const int locals_space = BytesPerWord * method() -> max_locals(); 137 int monitor_offset = locals_space + (2 * BytesPerWord) * (number_of_locks - 1); 138 bool large_offset = !Immediate::is_simm20(monitor_offset + BytesPerWord) && number_of_locks > 0; 139 140 if (large_offset) { 141 // z_lg can only handle displacement upto 20bit signed binary integer 142 __ z_algfi(OSR_buf, locals_space); 143 monitor_offset -= locals_space; 144 } 145 146 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 147 // the OSR buffer using 2 word entries: first the lock and then 148 // the oop. 149 for (int i = 0; i < number_of_locks; i++) { 150 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 151 // Verify the interpreter's monitor has a non-null object. 152 __ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is null", __LINE__); 153 // Copy the lock field into the compiled activation. 154 __ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf); 155 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i)); 156 __ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf); 157 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i)); 158 } 159 160 if (large_offset) { 161 __ z_slgfi(OSR_buf, locals_space); 162 } 163 } 164 } 165 166 // -------------------------------------------------------------------------------------------- 167 168 address LIR_Assembler::emit_call_c(address a) { 169 __ align_call_far_patchable(__ pc()); 170 address call_addr = __ call_c_opt(a); 171 if (call_addr == nullptr) { 172 bailout("const section overflow"); 173 } 174 return call_addr; 175 } 176 177 int LIR_Assembler::emit_exception_handler() { 178 // Generate code for exception handler. 179 address handler_base = __ start_a_stub(exception_handler_size()); 180 if (handler_base == nullptr) { 181 // Not enough space left for the handler. 182 bailout("exception handler overflow"); 183 return -1; 184 } 185 186 int offset = code_offset(); 187 188 address a = Runtime1::entry_for (StubId::c1_handle_exception_from_callee_id); 189 address call_addr = emit_call_c(a); 190 CHECK_BAILOUT_(-1); 191 __ should_not_reach_here(); 192 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 193 __ end_a_stub(); 194 195 return offset; 196 } 197 198 // Emit the code to remove the frame from the stack in the exception 199 // unwind path. 200 int LIR_Assembler::emit_unwind_handler() { 201 #ifndef PRODUCT 202 if (CommentedAssembly) { 203 _masm->block_comment("Unwind handler"); 204 } 205 #endif 206 207 int offset = code_offset(); 208 Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved. 209 Register Rtmp1 = Z_R11; 210 Register Rtmp2 = Z_R12; 211 212 // Fetch the exception from TLS and clear out exception related thread state. 213 Address exc_oop_addr = Address(Z_thread, JavaThread::exception_oop_offset()); 214 Address exc_pc_addr = Address(Z_thread, JavaThread::exception_pc_offset()); 215 __ z_lg(Z_EXC_OOP, exc_oop_addr); 216 __ clear_mem(exc_oop_addr, sizeof(oop)); 217 __ clear_mem(exc_pc_addr, sizeof(intptr_t)); 218 219 __ bind(_unwind_handler_entry); 220 __ verify_not_null_oop(Z_EXC_OOP); 221 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 222 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); // Preserve the exception. 223 } 224 225 // Perform needed unlocking. 226 MonitorExitStub* stub = nullptr; 227 if (method()->is_synchronized()) { 228 // StubId::c1_monitorexit_id expects lock address in Z_R1_scratch. 229 LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); 230 monitor_address(0, lock); 231 stub = new MonitorExitStub(lock, true, 0); 232 __ unlock_object(Rtmp1, Rtmp2, lock->as_register(), *stub->entry()); 233 __ bind(*stub->continuation()); 234 } 235 236 if (compilation()->env()->dtrace_method_probes()) { 237 ShouldNotReachHere(); // Not supported. 238 #if 0 239 __ mov(rdi, r15_thread); 240 __ mov_metadata(rsi, method()->constant_encoding()); 241 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 242 #endif 243 } 244 245 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 246 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); // Restore the exception. 247 } 248 249 // Remove the activation and dispatch to the unwind handler. 250 __ pop_frame(); 251 __ z_lg(Z_EXC_PC, _z_common_abi(return_pc), Z_SP); 252 253 // Z_EXC_OOP: exception oop 254 // Z_EXC_PC: exception pc 255 256 // Dispatch to the unwind logic. 257 __ load_const_optimized(Z_R5, Runtime1::entry_for (StubId::c1_unwind_exception_id)); 258 __ z_br(Z_R5); 259 260 // Emit the slow path assembly. 261 if (stub != nullptr) { 262 stub->emit_code(this); 263 } 264 265 return offset; 266 } 267 268 int LIR_Assembler::emit_deopt_handler() { 269 // Generate code for exception handler. 270 address handler_base = __ start_a_stub(deopt_handler_size()); 271 if (handler_base == nullptr) { 272 // Not enough space left for the handler. 273 bailout("deopt handler overflow"); 274 return -1; 275 } int offset = code_offset(); 276 // Size must be constant (see HandlerImpl::emit_deopt_handler). 277 __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack()); 278 __ call(Z_R1_scratch); 279 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 280 __ end_a_stub(); 281 282 return offset; 283 } 284 285 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 286 if (o == nullptr) { 287 __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove. 288 } else { 289 AddressLiteral a = __ allocate_oop_address(o); 290 bool success = __ load_oop_from_toc(reg, a, reg); 291 if (!success) { 292 bailout("const section overflow"); 293 } 294 } 295 } 296 297 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 298 // Allocate a new index in table to hold the object once it's been patched. 299 int oop_index = __ oop_recorder()->allocate_oop_index(nullptr); 300 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 301 302 AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index)); 303 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 304 // The null will be dynamically patched later so the sequence to 305 // load the address literal must not be optimized. 306 __ load_const(reg, addrlit); 307 308 patching_epilog(patch, lir_patch_normal, reg, info); 309 } 310 311 void LIR_Assembler::metadata2reg(Metadata* md, Register reg) { 312 bool success = __ set_metadata_constant(md, reg); 313 if (!success) { 314 bailout("const section overflow"); 315 return; 316 } 317 } 318 319 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 320 // Allocate a new index in table to hold the klass once it's been patched. 321 int index = __ oop_recorder()->allocate_metadata_index(nullptr); 322 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 323 AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index)); 324 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 325 // The null will be dynamically patched later so the sequence to 326 // load the address literal must not be optimized. 327 __ load_const(reg, addrlit); 328 329 patching_epilog(patch, lir_patch_normal, reg, info); 330 } 331 332 void LIR_Assembler::emit_op3(LIR_Op3* op) { 333 switch (op->code()) { 334 case lir_idiv: 335 case lir_irem: 336 arithmetic_idiv(op->code(), 337 op->in_opr1(), 338 op->in_opr2(), 339 op->in_opr3(), 340 op->result_opr(), 341 op->info()); 342 break; 343 case lir_fmad: { 344 const FloatRegister opr1 = op->in_opr1()->as_double_reg(), 345 opr2 = op->in_opr2()->as_double_reg(), 346 opr3 = op->in_opr3()->as_double_reg(), 347 res = op->result_opr()->as_double_reg(); 348 __ z_madbr(opr3, opr1, opr2); 349 if (res != opr3) { __ z_ldr(res, opr3); } 350 } break; 351 case lir_fmaf: { 352 const FloatRegister opr1 = op->in_opr1()->as_float_reg(), 353 opr2 = op->in_opr2()->as_float_reg(), 354 opr3 = op->in_opr3()->as_float_reg(), 355 res = op->result_opr()->as_float_reg(); 356 __ z_maebr(opr3, opr1, opr2); 357 if (res != opr3) { __ z_ler(res, opr3); } 358 } break; 359 default: ShouldNotReachHere(); break; 360 } 361 } 362 363 364 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 365 #ifdef ASSERT 366 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 367 if (op->block() != nullptr) { _branch_target_blocks.append(op->block()); } 368 if (op->ublock() != nullptr) { _branch_target_blocks.append(op->ublock()); } 369 #endif 370 371 if (op->cond() == lir_cond_always) { 372 if (op->info() != nullptr) { add_debug_info_for_branch(op->info()); } 373 __ branch_optimized(Assembler::bcondAlways, *(op->label())); 374 } else { 375 Assembler::branch_condition acond = Assembler::bcondZero; 376 if (op->code() == lir_cond_float_branch) { 377 assert(op->ublock() != nullptr, "must have unordered successor"); 378 __ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label())); 379 } 380 switch (op->cond()) { 381 case lir_cond_equal: acond = Assembler::bcondEqual; break; 382 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; break; 383 case lir_cond_less: acond = Assembler::bcondLow; break; 384 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; break; 385 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; break; 386 case lir_cond_greater: acond = Assembler::bcondHigh; break; 387 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; break; 388 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; break; 389 default: ShouldNotReachHere(); 390 } 391 __ branch_optimized(acond,*(op->label())); 392 } 393 } 394 395 396 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 397 LIR_Opr src = op->in_opr(); 398 LIR_Opr dest = op->result_opr(); 399 400 switch (op->bytecode()) { 401 case Bytecodes::_i2l: 402 __ move_reg_if_needed(dest->as_register_lo(), T_LONG, src->as_register(), T_INT); 403 break; 404 405 case Bytecodes::_l2i: 406 __ move_reg_if_needed(dest->as_register(), T_INT, src->as_register_lo(), T_LONG); 407 break; 408 409 case Bytecodes::_i2b: 410 __ move_reg_if_needed(dest->as_register(), T_BYTE, src->as_register(), T_INT); 411 break; 412 413 case Bytecodes::_i2c: 414 __ move_reg_if_needed(dest->as_register(), T_CHAR, src->as_register(), T_INT); 415 break; 416 417 case Bytecodes::_i2s: 418 __ move_reg_if_needed(dest->as_register(), T_SHORT, src->as_register(), T_INT); 419 break; 420 421 case Bytecodes::_f2d: 422 assert(dest->is_double_fpu(), "check"); 423 __ move_freg_if_needed(dest->as_double_reg(), T_DOUBLE, src->as_float_reg(), T_FLOAT); 424 break; 425 426 case Bytecodes::_d2f: 427 assert(dest->is_single_fpu(), "check"); 428 __ move_freg_if_needed(dest->as_float_reg(), T_FLOAT, src->as_double_reg(), T_DOUBLE); 429 break; 430 431 case Bytecodes::_i2f: 432 __ z_cefbr(dest->as_float_reg(), src->as_register()); 433 break; 434 435 case Bytecodes::_i2d: 436 __ z_cdfbr(dest->as_double_reg(), src->as_register()); 437 break; 438 439 case Bytecodes::_l2f: 440 __ z_cegbr(dest->as_float_reg(), src->as_register_lo()); 441 break; 442 case Bytecodes::_l2d: 443 __ z_cdgbr(dest->as_double_reg(), src->as_register_lo()); 444 break; 445 446 case Bytecodes::_f2i: 447 case Bytecodes::_f2l: { 448 Label done; 449 FloatRegister Rsrc = src->as_float_reg(); 450 Register Rdst = (op->bytecode() == Bytecodes::_f2i ? dest->as_register() : dest->as_register_lo()); 451 __ clear_reg(Rdst, true, false); 452 __ z_cebr(Rsrc, Rsrc); 453 __ z_brno(done); // NaN -> 0 454 if (op->bytecode() == Bytecodes::_f2i) { 455 __ z_cfebr(Rdst, Rsrc, Assembler::to_zero); 456 } else { // op->bytecode() == Bytecodes::_f2l 457 __ z_cgebr(Rdst, Rsrc, Assembler::to_zero); 458 } 459 __ bind(done); 460 } 461 break; 462 463 case Bytecodes::_d2i: 464 case Bytecodes::_d2l: { 465 Label done; 466 FloatRegister Rsrc = src->as_double_reg(); 467 Register Rdst = (op->bytecode() == Bytecodes::_d2i ? dest->as_register() : dest->as_register_lo()); 468 __ clear_reg(Rdst, true, false); // Don't set CC. 469 __ z_cdbr(Rsrc, Rsrc); 470 __ z_brno(done); // NaN -> 0 471 if (op->bytecode() == Bytecodes::_d2i) { 472 __ z_cfdbr(Rdst, Rsrc, Assembler::to_zero); 473 } else { // Bytecodes::_d2l 474 __ z_cgdbr(Rdst, Rsrc, Assembler::to_zero); 475 } 476 __ bind(done); 477 } 478 break; 479 480 default: ShouldNotReachHere(); 481 } 482 } 483 484 void LIR_Assembler::align_call(LIR_Code code) { 485 // End of call instruction must be 4 byte aligned. 486 int offset = __ offset(); 487 switch (code) { 488 case lir_icvirtual_call: 489 offset += MacroAssembler::load_const_from_toc_size(); 490 // no break 491 case lir_static_call: 492 case lir_optvirtual_call: 493 case lir_dynamic_call: 494 offset += NativeCall::call_far_pcrelative_displacement_offset; 495 break; 496 default: ShouldNotReachHere(); 497 } 498 if ((offset & (NativeCall::call_far_pcrelative_displacement_alignment-1)) != 0) { 499 __ nop(); 500 } 501 } 502 503 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 504 assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0, 505 "must be aligned (offset=%d)", __ offset()); 506 assert(rtype == relocInfo::none || 507 rtype == relocInfo::opt_virtual_call_type || 508 rtype == relocInfo::static_call_type, "unexpected rtype"); 509 // Prepend each BRASL with a nop. 510 __ relocate(rtype); 511 __ z_nop(); 512 __ z_brasl(Z_R14, op->addr()); 513 add_call_info(code_offset(), op->info()); 514 } 515 516 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 517 address virtual_call_oop_addr = nullptr; 518 AddressLiteral empty_ic((address) Universe::non_oop_word()); 519 virtual_call_oop_addr = __ pc(); 520 bool success = __ load_const_from_toc(Z_inline_cache, empty_ic); 521 if (!success) { 522 bailout("const section overflow"); 523 return; 524 } 525 526 // CALL to fixup routine. Fixup routine uses ScopeDesc info 527 // to determine who we intended to call. 528 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); 529 call(op, relocInfo::none); 530 } 531 532 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 533 if (from_reg != to_reg) __ z_lgr(to_reg, from_reg); 534 } 535 536 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 537 assert(src->is_constant(), "should not call otherwise"); 538 assert(dest->is_stack(), "should not call otherwise"); 539 LIR_Const* c = src->as_constant_ptr(); 540 541 unsigned int lmem = 0; 542 unsigned int lcon = 0; 543 int64_t cbits = 0; 544 Address dest_addr; 545 switch (c->type()) { 546 case T_INT: // fall through 547 case T_FLOAT: 548 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 549 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 550 break; 551 552 case T_ADDRESS: 553 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 554 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 555 break; 556 557 case T_OBJECT: 558 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 559 if (c->as_jobject() == nullptr) { 560 __ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8); 561 } else { 562 jobject2reg(c->as_jobject(), Z_R1_scratch); 563 __ reg2mem_opt(Z_R1_scratch, dest_addr, true); 564 } 565 return; 566 567 case T_LONG: // fall through 568 case T_DOUBLE: 569 dest_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 570 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 571 break; 572 573 default: 574 ShouldNotReachHere(); 575 } 576 577 __ store_const(dest_addr, cbits, lmem, lcon); 578 } 579 580 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 581 assert(src->is_constant(), "should not call otherwise"); 582 assert(dest->is_address(), "should not call otherwise"); 583 584 LIR_Const* c = src->as_constant_ptr(); 585 Address addr = as_Address(dest->as_address_ptr()); 586 587 int store_offset = -1; 588 589 if (dest->as_address_ptr()->index()->is_valid()) { 590 switch (type) { 591 case T_INT: // fall through 592 case T_FLOAT: 593 __ load_const_optimized(Z_R0_scratch, c->as_jint_bits()); 594 store_offset = __ offset(); 595 if (Immediate::is_uimm12(addr.disp())) { 596 __ z_st(Z_R0_scratch, addr); 597 } else { 598 __ z_sty(Z_R0_scratch, addr); 599 } 600 break; 601 602 case T_ADDRESS: 603 __ load_const_optimized(Z_R1_scratch, c->as_jint_bits()); 604 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 605 break; 606 607 case T_OBJECT: // fall through 608 case T_ARRAY: 609 if (c->as_jobject() == nullptr) { 610 if (UseCompressedOops && !wide) { 611 __ clear_reg(Z_R1_scratch, false); 612 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 613 } else { 614 __ clear_reg(Z_R1_scratch, true); 615 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 616 } 617 } else { 618 jobject2reg(c->as_jobject(), Z_R1_scratch); 619 if (UseCompressedOops && !wide) { 620 __ encode_heap_oop(Z_R1_scratch); 621 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 622 } else { 623 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 624 } 625 } 626 assert(store_offset >= 0, "check"); 627 break; 628 629 case T_LONG: // fall through 630 case T_DOUBLE: 631 __ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits())); 632 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 633 break; 634 635 case T_BOOLEAN: // fall through 636 case T_BYTE: 637 __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint())); 638 store_offset = __ offset(); 639 if (Immediate::is_uimm12(addr.disp())) { 640 __ z_stc(Z_R0_scratch, addr); 641 } else { 642 __ z_stcy(Z_R0_scratch, addr); 643 } 644 break; 645 646 case T_CHAR: // fall through 647 case T_SHORT: 648 __ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint())); 649 store_offset = __ offset(); 650 if (Immediate::is_uimm12(addr.disp())) { 651 __ z_sth(Z_R0_scratch, addr); 652 } else { 653 __ z_sthy(Z_R0_scratch, addr); 654 } 655 break; 656 657 default: 658 ShouldNotReachHere(); 659 } 660 661 } else { // no index 662 663 unsigned int lmem = 0; 664 unsigned int lcon = 0; 665 int64_t cbits = 0; 666 667 switch (type) { 668 case T_INT: // fall through 669 case T_FLOAT: 670 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 671 break; 672 673 case T_ADDRESS: 674 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 675 break; 676 677 case T_OBJECT: // fall through 678 case T_ARRAY: 679 if (c->as_jobject() == nullptr) { 680 if (UseCompressedOops && !wide) { 681 store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4); 682 } else { 683 store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8); 684 } 685 } else { 686 jobject2reg(c->as_jobject(), Z_R1_scratch); 687 if (UseCompressedOops && !wide) { 688 __ encode_heap_oop(Z_R1_scratch); 689 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 690 } else { 691 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 692 } 693 } 694 assert(store_offset >= 0, "check"); 695 break; 696 697 case T_LONG: // fall through 698 case T_DOUBLE: 699 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 700 break; 701 702 case T_BOOLEAN: // fall through 703 case T_BYTE: 704 lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint()); 705 break; 706 707 case T_CHAR: // fall through 708 case T_SHORT: 709 lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint()); 710 break; 711 712 default: 713 ShouldNotReachHere(); 714 } 715 716 if (store_offset == -1) { 717 store_offset = __ store_const(addr, cbits, lmem, lcon); 718 assert(store_offset >= 0, "check"); 719 } 720 } 721 722 if (info != nullptr) { 723 add_debug_info_for_null_check(store_offset, info); 724 } 725 } 726 727 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 728 assert(src->is_constant(), "should not call otherwise"); 729 assert(dest->is_register(), "should not call otherwise"); 730 LIR_Const* c = src->as_constant_ptr(); 731 732 switch (c->type()) { 733 case T_INT: { 734 assert(patch_code == lir_patch_none, "no patching handled here"); 735 __ load_const_optimized(dest->as_register(), c->as_jint()); 736 break; 737 } 738 739 case T_ADDRESS: { 740 assert(patch_code == lir_patch_none, "no patching handled here"); 741 __ load_const_optimized(dest->as_register(), c->as_jint()); 742 break; 743 } 744 745 case T_LONG: { 746 assert(patch_code == lir_patch_none, "no patching handled here"); 747 __ load_const_optimized(dest->as_register_lo(), (intptr_t)c->as_jlong()); 748 break; 749 } 750 751 case T_OBJECT: { 752 if (patch_code != lir_patch_none) { 753 jobject2reg_with_patching(dest->as_register(), info); 754 } else { 755 jobject2reg(c->as_jobject(), dest->as_register()); 756 } 757 break; 758 } 759 760 case T_METADATA: { 761 if (patch_code != lir_patch_none) { 762 klass2reg_with_patching(dest->as_register(), info); 763 } else { 764 metadata2reg(c->as_metadata(), dest->as_register()); 765 } 766 break; 767 } 768 769 case T_FLOAT: { 770 Register toc_reg = Z_R1_scratch; 771 __ load_toc(toc_reg); 772 address const_addr = __ float_constant(c->as_jfloat()); 773 if (const_addr == nullptr) { 774 bailout("const section overflow"); 775 break; 776 } 777 int displ = const_addr - _masm->code()->consts()->start(); 778 if (dest->is_single_fpu()) { 779 __ z_ley(dest->as_float_reg(), displ, toc_reg); 780 } else { 781 assert(dest->is_single_cpu(), "Must be a cpu register."); 782 __ z_ly(dest->as_register(), displ, toc_reg); 783 } 784 } 785 break; 786 787 case T_DOUBLE: { 788 Register toc_reg = Z_R1_scratch; 789 __ load_toc(toc_reg); 790 address const_addr = __ double_constant(c->as_jdouble()); 791 if (const_addr == nullptr) { 792 bailout("const section overflow"); 793 break; 794 } 795 int displ = const_addr - _masm->code()->consts()->start(); 796 if (dest->is_double_fpu()) { 797 __ z_ldy(dest->as_double_reg(), displ, toc_reg); 798 } else { 799 assert(dest->is_double_cpu(), "Must be a long register."); 800 __ z_lg(dest->as_register_lo(), displ, toc_reg); 801 } 802 } 803 break; 804 805 default: 806 ShouldNotReachHere(); 807 } 808 } 809 810 Address LIR_Assembler::as_Address(LIR_Address* addr) { 811 if (addr->base()->is_illegal()) { 812 Unimplemented(); 813 } 814 815 Register base = addr->base()->as_pointer_register(); 816 817 if (addr->index()->is_illegal()) { 818 return Address(base, addr->disp()); 819 } else if (addr->index()->is_cpu_register()) { 820 Register index = addr->index()->as_pointer_register(); 821 return Address(base, index, addr->disp()); 822 } else if (addr->index()->is_constant()) { 823 intptr_t addr_offset = addr->index()->as_constant_ptr()->as_jint() + addr->disp(); 824 return Address(base, addr_offset); 825 } else { 826 ShouldNotReachHere(); 827 return Address(); 828 } 829 } 830 831 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 832 switch (type) { 833 case T_INT: 834 case T_FLOAT: { 835 Register tmp = Z_R1_scratch; 836 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 837 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 838 __ mem2reg_opt(tmp, from, false); 839 __ reg2mem_opt(tmp, to, false); 840 break; 841 } 842 case T_ADDRESS: 843 case T_OBJECT: { 844 Register tmp = Z_R1_scratch; 845 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 846 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 847 __ mem2reg_opt(tmp, from, true); 848 __ reg2mem_opt(tmp, to, true); 849 break; 850 } 851 case T_LONG: 852 case T_DOUBLE: { 853 Register tmp = Z_R1_scratch; 854 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 855 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 856 __ mem2reg_opt(tmp, from, true); 857 __ reg2mem_opt(tmp, to, true); 858 break; 859 } 860 861 default: 862 ShouldNotReachHere(); 863 } 864 } 865 866 // 4-byte accesses only! Don't use it to access 8 bytes! 867 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 868 ShouldNotCallThis(); 869 return Address(); // unused 870 } 871 872 // 4-byte accesses only! Don't use it to access 8 bytes! 873 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 874 ShouldNotCallThis(); 875 return Address(); // unused 876 } 877 878 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, 879 CodeEmitInfo* info, bool wide) { 880 881 assert(type != T_METADATA, "load of metadata ptr not supported"); 882 LIR_Address* addr = src_opr->as_address_ptr(); 883 LIR_Opr to_reg = dest; 884 885 Register src = addr->base()->as_pointer_register(); 886 Register disp_reg = Z_R0; 887 int disp_value = addr->disp(); 888 bool needs_patching = (patch_code != lir_patch_none); 889 890 if (addr->base()->type() == T_OBJECT) { 891 __ verify_oop(src, FILE_AND_LINE); 892 } 893 894 PatchingStub* patch = nullptr; 895 if (needs_patching) { 896 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 897 assert(!to_reg->is_double_cpu() || 898 patch_code == lir_patch_none || 899 patch_code == lir_patch_normal, "patching doesn't match register"); 900 } 901 902 if (addr->index()->is_illegal()) { 903 if (!Immediate::is_simm20(disp_value)) { 904 if (needs_patching) { 905 __ load_const(Z_R1_scratch, (intptr_t)0); 906 } else { 907 __ load_const_optimized(Z_R1_scratch, disp_value); 908 } 909 disp_reg = Z_R1_scratch; 910 disp_value = 0; 911 } 912 } else { 913 if (!Immediate::is_simm20(disp_value)) { 914 __ load_const_optimized(Z_R1_scratch, disp_value); 915 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 916 disp_reg = Z_R1_scratch; 917 disp_value = 0; 918 } 919 disp_reg = addr->index()->as_pointer_register(); 920 } 921 922 // Remember the offset of the load. The patching_epilog must be done 923 // before the call to add_debug_info, otherwise the PcDescs don't get 924 // entered in increasing order. 925 int offset = code_offset(); 926 927 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 928 929 bool short_disp = Immediate::is_uimm12(disp_value); 930 931 switch (type) { 932 case T_BOOLEAN: // fall through 933 case T_BYTE : __ z_lb(dest->as_register(), disp_value, disp_reg, src); break; 934 case T_CHAR : __ z_llgh(dest->as_register(), disp_value, disp_reg, src); break; 935 case T_SHORT : 936 if (short_disp) { 937 __ z_lh(dest->as_register(), disp_value, disp_reg, src); 938 } else { 939 __ z_lhy(dest->as_register(), disp_value, disp_reg, src); 940 } 941 break; 942 case T_INT : 943 if (short_disp) { 944 __ z_l(dest->as_register(), disp_value, disp_reg, src); 945 } else { 946 __ z_ly(dest->as_register(), disp_value, disp_reg, src); 947 } 948 break; 949 case T_ADDRESS: 950 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 951 break; 952 case T_ARRAY : // fall through 953 case T_OBJECT: 954 { 955 if (UseCompressedOops && !wide) { 956 __ z_llgf(dest->as_register(), disp_value, disp_reg, src); 957 __ oop_decoder(dest->as_register(), dest->as_register(), true); 958 } else { 959 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 960 } 961 __ verify_oop(dest->as_register(), FILE_AND_LINE); 962 break; 963 } 964 case T_FLOAT: 965 if (short_disp) { 966 __ z_le(dest->as_float_reg(), disp_value, disp_reg, src); 967 } else { 968 __ z_ley(dest->as_float_reg(), disp_value, disp_reg, src); 969 } 970 break; 971 case T_DOUBLE: 972 if (short_disp) { 973 __ z_ld(dest->as_double_reg(), disp_value, disp_reg, src); 974 } else { 975 __ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src); 976 } 977 break; 978 case T_LONG : __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break; 979 default : ShouldNotReachHere(); 980 } 981 982 if (patch != nullptr) { 983 patching_epilog(patch, patch_code, src, info); 984 } 985 if (info != nullptr) add_debug_info_for_null_check(offset, info); 986 } 987 988 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 989 assert(src->is_stack(), "should not call otherwise"); 990 assert(dest->is_register(), "should not call otherwise"); 991 992 if (dest->is_single_cpu()) { 993 if (is_reference_type(type)) { 994 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 995 __ verify_oop(dest->as_register(), FILE_AND_LINE); 996 } else if (type == T_METADATA || type == T_ADDRESS) { 997 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 998 } else { 999 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false); 1000 } 1001 } else if (dest->is_double_cpu()) { 1002 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix()); 1003 __ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true); 1004 } else if (dest->is_single_fpu()) { 1005 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1006 __ mem2freg_opt(dest->as_float_reg(), src_addr, false); 1007 } else if (dest->is_double_fpu()) { 1008 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1009 __ mem2freg_opt(dest->as_double_reg(), src_addr, true); 1010 } else { 1011 ShouldNotReachHere(); 1012 } 1013 } 1014 1015 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1016 assert(src->is_register(), "should not call otherwise"); 1017 assert(dest->is_stack(), "should not call otherwise"); 1018 1019 if (src->is_single_cpu()) { 1020 const Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 1021 if (is_reference_type(type)) { 1022 __ verify_oop(src->as_register(), FILE_AND_LINE); 1023 __ reg2mem_opt(src->as_register(), dst, true); 1024 } else if (type == T_METADATA || type == T_ADDRESS) { 1025 __ reg2mem_opt(src->as_register(), dst, true); 1026 } else { 1027 __ reg2mem_opt(src->as_register(), dst, false); 1028 } 1029 } else if (src->is_double_cpu()) { 1030 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix()); 1031 __ reg2mem_opt(src->as_register_lo(), dstLO, true); 1032 } else if (src->is_single_fpu()) { 1033 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1034 __ freg2mem_opt(src->as_float_reg(), dst_addr, false); 1035 } else if (src->is_double_fpu()) { 1036 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1037 __ freg2mem_opt(src->as_double_reg(), dst_addr, true); 1038 } else { 1039 ShouldNotReachHere(); 1040 } 1041 } 1042 1043 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1044 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1045 if (from_reg->is_double_fpu()) { 1046 // double to double moves 1047 assert(to_reg->is_double_fpu(), "should match"); 1048 __ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg()); 1049 } else { 1050 // float to float moves 1051 assert(to_reg->is_single_fpu(), "should match"); 1052 __ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg()); 1053 } 1054 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1055 if (from_reg->is_double_cpu()) { 1056 __ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1057 } else if (to_reg->is_double_cpu()) { 1058 // int to int moves 1059 __ z_lgr(to_reg->as_register_lo(), from_reg->as_register()); 1060 } else { 1061 // int to int moves 1062 __ z_lgr(to_reg->as_register(), from_reg->as_register()); 1063 } 1064 } else { 1065 ShouldNotReachHere(); 1066 } 1067 if (is_reference_type(to_reg->type())) { 1068 __ verify_oop(to_reg->as_register(), FILE_AND_LINE); 1069 } 1070 } 1071 1072 void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type, 1073 LIR_PatchCode patch_code, CodeEmitInfo* info, 1074 bool wide) { 1075 assert(type != T_METADATA, "store of metadata ptr not supported"); 1076 LIR_Address* addr = dest_opr->as_address_ptr(); 1077 1078 Register dest = addr->base()->as_pointer_register(); 1079 Register disp_reg = Z_R0; 1080 int disp_value = addr->disp(); 1081 bool needs_patching = (patch_code != lir_patch_none); 1082 1083 if (addr->base()->is_oop_register()) { 1084 __ verify_oop(dest, FILE_AND_LINE); 1085 } 1086 1087 PatchingStub* patch = nullptr; 1088 if (needs_patching) { 1089 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1090 assert(!from->is_double_cpu() || 1091 patch_code == lir_patch_none || 1092 patch_code == lir_patch_normal, "patching doesn't match register"); 1093 } 1094 1095 assert(!needs_patching || (!Immediate::is_simm20(disp_value) && addr->index()->is_illegal()), "assumption"); 1096 if (addr->index()->is_illegal()) { 1097 if (!Immediate::is_simm20(disp_value)) { 1098 if (needs_patching) { 1099 __ load_const(Z_R1_scratch, (intptr_t)0); 1100 } else { 1101 __ load_const_optimized(Z_R1_scratch, disp_value); 1102 } 1103 disp_reg = Z_R1_scratch; 1104 disp_value = 0; 1105 } 1106 } else { 1107 if (!Immediate::is_simm20(disp_value)) { 1108 __ load_const_optimized(Z_R1_scratch, disp_value); 1109 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 1110 disp_reg = Z_R1_scratch; 1111 disp_value = 0; 1112 } 1113 disp_reg = addr->index()->as_pointer_register(); 1114 } 1115 1116 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 1117 1118 if (is_reference_type(type)) { 1119 __ verify_oop(from->as_register(), FILE_AND_LINE); 1120 } 1121 1122 bool short_disp = Immediate::is_uimm12(disp_value); 1123 1124 // Remember the offset of the store. The patching_epilog must be done 1125 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1126 // entered in increasing order. 1127 int offset = code_offset(); 1128 switch (type) { 1129 case T_BOOLEAN: // fall through 1130 case T_BYTE : 1131 if (short_disp) { 1132 __ z_stc(from->as_register(), disp_value, disp_reg, dest); 1133 } else { 1134 __ z_stcy(from->as_register(), disp_value, disp_reg, dest); 1135 } 1136 break; 1137 case T_CHAR : // fall through 1138 case T_SHORT : 1139 if (short_disp) { 1140 __ z_sth(from->as_register(), disp_value, disp_reg, dest); 1141 } else { 1142 __ z_sthy(from->as_register(), disp_value, disp_reg, dest); 1143 } 1144 break; 1145 case T_INT : 1146 if (short_disp) { 1147 __ z_st(from->as_register(), disp_value, disp_reg, dest); 1148 } else { 1149 __ z_sty(from->as_register(), disp_value, disp_reg, dest); 1150 } 1151 break; 1152 case T_LONG : __ z_stg(from->as_register_lo(), disp_value, disp_reg, dest); break; 1153 case T_ADDRESS: __ z_stg(from->as_register(), disp_value, disp_reg, dest); break; 1154 break; 1155 case T_ARRAY : // fall through 1156 case T_OBJECT: 1157 { 1158 if (UseCompressedOops && !wide) { 1159 Register compressed_src = Z_R14; 1160 __ oop_encoder(compressed_src, from->as_register(), true, (disp_reg != Z_R1) ? Z_R1 : Z_R0, -1, true); 1161 offset = code_offset(); 1162 if (short_disp) { 1163 __ z_st(compressed_src, disp_value, disp_reg, dest); 1164 } else { 1165 __ z_sty(compressed_src, disp_value, disp_reg, dest); 1166 } 1167 } else { 1168 __ z_stg(from->as_register(), disp_value, disp_reg, dest); 1169 } 1170 break; 1171 } 1172 case T_FLOAT : 1173 if (short_disp) { 1174 __ z_ste(from->as_float_reg(), disp_value, disp_reg, dest); 1175 } else { 1176 __ z_stey(from->as_float_reg(), disp_value, disp_reg, dest); 1177 } 1178 break; 1179 case T_DOUBLE: 1180 if (short_disp) { 1181 __ z_std(from->as_double_reg(), disp_value, disp_reg, dest); 1182 } else { 1183 __ z_stdy(from->as_double_reg(), disp_value, disp_reg, dest); 1184 } 1185 break; 1186 default: ShouldNotReachHere(); 1187 } 1188 1189 if (patch != nullptr) { 1190 patching_epilog(patch, patch_code, dest, info); 1191 } 1192 1193 if (info != nullptr) add_debug_info_for_null_check(offset, info); 1194 } 1195 1196 1197 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 1198 assert(result->is_illegal() || 1199 (result->is_single_cpu() && result->as_register() == Z_R2) || 1200 (result->is_double_cpu() && result->as_register_lo() == Z_R2) || 1201 (result->is_single_fpu() && result->as_float_reg() == Z_F0) || 1202 (result->is_double_fpu() && result->as_double_reg() == Z_F0), "convention"); 1203 1204 __ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::polling_page_offset())); 1205 1206 // Pop the frame before the safepoint code. 1207 __ pop_frame_restore_retPC(initial_frame_size_in_bytes()); 1208 1209 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1210 __ reserved_stack_check(Z_R14); 1211 } 1212 1213 // We need to mark the code position where the load from the safepoint 1214 // polling page was emitted as relocInfo::poll_return_type here. 1215 __ relocate(relocInfo::poll_return_type); 1216 __ load_from_polling_page(Z_R1_scratch); 1217 1218 __ z_br(Z_R14); // Return to caller. 1219 } 1220 1221 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1222 const Register poll_addr = tmp->as_register_lo(); 1223 __ z_lg(poll_addr, Address(Z_thread, JavaThread::polling_page_offset())); 1224 guarantee(info != nullptr, "Shouldn't be null"); 1225 add_debug_info_for_branch(info); 1226 int offset = __ offset(); 1227 __ relocate(relocInfo::poll_type); 1228 __ load_from_polling_page(poll_addr); 1229 return offset; 1230 } 1231 1232 void LIR_Assembler::emit_static_call_stub() { 1233 1234 // Stub is fixed up when the corresponding call is converted from calling 1235 // compiled code to calling interpreted code. 1236 1237 address call_pc = __ pc(); 1238 address stub = __ start_a_stub(call_stub_size()); 1239 if (stub == nullptr) { 1240 bailout("static call stub overflow"); 1241 return; 1242 } 1243 1244 int start = __ offset(); 1245 1246 __ relocate(static_stub_Relocation::spec(call_pc)); 1247 1248 // See also Matcher::interpreter_method_reg(). 1249 AddressLiteral meta = __ allocate_metadata_address(nullptr); 1250 bool success = __ load_const_from_toc(Z_method, meta); 1251 1252 __ set_inst_mark(); 1253 AddressLiteral a((address)-1); 1254 success = success && __ load_const_from_toc(Z_R1, a); 1255 if (!success) { 1256 bailout("const section overflow"); 1257 return; 1258 } 1259 1260 __ z_br(Z_R1); 1261 assert(__ offset() - start <= call_stub_size(), "stub too big"); 1262 __ end_a_stub(); // Update current stubs pointer and restore insts_end. 1263 } 1264 1265 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1266 bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual; 1267 if (opr1->is_single_cpu()) { 1268 Register reg1 = opr1->as_register(); 1269 if (opr2->is_single_cpu()) { 1270 // cpu register - cpu register 1271 if (is_reference_type(opr1->type())) { 1272 __ z_clgr(reg1, opr2->as_register()); 1273 } else { 1274 assert(!is_reference_type(opr2->type()), "cmp int, oop?"); 1275 if (unsigned_comp) { 1276 __ z_clr(reg1, opr2->as_register()); 1277 } else { 1278 __ z_cr(reg1, opr2->as_register()); 1279 } 1280 } 1281 } else if (opr2->is_stack()) { 1282 // cpu register - stack 1283 if (is_reference_type(opr1->type())) { 1284 __ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1285 } else { 1286 if (unsigned_comp) { 1287 __ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1288 } else { 1289 __ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1290 } 1291 } 1292 } else if (opr2->is_constant()) { 1293 // cpu register - constant 1294 LIR_Const* c = opr2->as_constant_ptr(); 1295 if (c->type() == T_INT) { 1296 if (unsigned_comp) { 1297 __ z_clfi(reg1, c->as_jint()); 1298 } else { 1299 __ z_cfi(reg1, c->as_jint()); 1300 } 1301 } else if (c->type() == T_METADATA) { 1302 // We only need, for now, comparison with null for metadata. 1303 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1304 Metadata* m = c->as_metadata(); 1305 if (m == nullptr) { 1306 __ z_cghi(reg1, 0); 1307 } else { 1308 ShouldNotReachHere(); 1309 } 1310 } else if (is_reference_type(c->type())) { 1311 // In 64bit oops are single register. 1312 jobject o = c->as_jobject(); 1313 if (o == nullptr) { 1314 __ z_ltgr(reg1, reg1); 1315 } else { 1316 jobject2reg(o, Z_R1_scratch); 1317 __ z_cgr(reg1, Z_R1_scratch); 1318 } 1319 } else { 1320 fatal("unexpected type: %s", basictype_to_str(c->type())); 1321 } 1322 // cpu register - address 1323 } else if (opr2->is_address()) { 1324 if (op->info() != nullptr) { 1325 add_debug_info_for_null_check_here(op->info()); 1326 } 1327 if (unsigned_comp) { 1328 __ z_cly(reg1, as_Address(opr2->as_address_ptr())); 1329 } else { 1330 __ z_cy(reg1, as_Address(opr2->as_address_ptr())); 1331 } 1332 } else { 1333 ShouldNotReachHere(); 1334 } 1335 1336 } else if (opr1->is_double_cpu()) { 1337 assert(!unsigned_comp, "unexpected"); 1338 Register xlo = opr1->as_register_lo(); 1339 Register xhi = opr1->as_register_hi(); 1340 if (opr2->is_double_cpu()) { 1341 __ z_cgr(xlo, opr2->as_register_lo()); 1342 } else if (opr2->is_constant()) { 1343 // cpu register - constant 0 1344 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 1345 __ z_ltgr(xlo, xlo); 1346 } else { 1347 ShouldNotReachHere(); 1348 } 1349 1350 } else if (opr1->is_single_fpu()) { 1351 if (opr2->is_single_fpu()) { 1352 __ z_cebr(opr1->as_float_reg(), opr2->as_float_reg()); 1353 } else { 1354 // stack slot 1355 Address addr = frame_map()->address_for_slot(opr2->single_stack_ix()); 1356 if (Immediate::is_uimm12(addr.disp())) { 1357 __ z_ceb(opr1->as_float_reg(), addr); 1358 } else { 1359 __ z_ley(Z_fscratch_1, addr); 1360 __ z_cebr(opr1->as_float_reg(), Z_fscratch_1); 1361 } 1362 } 1363 } else if (opr1->is_double_fpu()) { 1364 if (opr2->is_double_fpu()) { 1365 __ z_cdbr(opr1->as_double_reg(), opr2->as_double_reg()); 1366 } else { 1367 // stack slot 1368 Address addr = frame_map()->address_for_slot(opr2->double_stack_ix()); 1369 if (Immediate::is_uimm12(addr.disp())) { 1370 __ z_cdb(opr1->as_double_reg(), addr); 1371 } else { 1372 __ z_ldy(Z_fscratch_1, addr); 1373 __ z_cdbr(opr1->as_double_reg(), Z_fscratch_1); 1374 } 1375 } 1376 } else { 1377 ShouldNotReachHere(); 1378 } 1379 } 1380 1381 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1382 Label done; 1383 Register dreg = dst->as_register(); 1384 1385 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1386 assert((left->is_single_fpu() && right->is_single_fpu()) || 1387 (left->is_double_fpu() && right->is_double_fpu()), "unexpected operand types"); 1388 bool is_single = left->is_single_fpu(); 1389 bool is_unordered_less = (code == lir_ucmp_fd2i); 1390 FloatRegister lreg = is_single ? left->as_float_reg() : left->as_double_reg(); 1391 FloatRegister rreg = is_single ? right->as_float_reg() : right->as_double_reg(); 1392 if (is_single) { 1393 __ z_cebr(lreg, rreg); 1394 } else { 1395 __ z_cdbr(lreg, rreg); 1396 } 1397 if (VM_Version::has_LoadStoreConditional()) { 1398 Register one = Z_R0_scratch; 1399 Register minus_one = Z_R1_scratch; 1400 __ z_lghi(minus_one, -1); 1401 __ z_lghi(one, 1); 1402 __ z_lghi(dreg, 0); 1403 __ z_locgr(dreg, one, is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered); 1404 __ z_locgr(dreg, minus_one, is_unordered_less ? Assembler::bcondLowOrNotOrdered : Assembler::bcondLow); 1405 } else { 1406 __ clear_reg(dreg, true, false); 1407 __ z_bre(done); // if (left == right) dst = 0 1408 1409 // if (left > right || ((code ~= cmpg) && (left <> right)) dst := 1 1410 __ z_lhi(dreg, 1); 1411 __ z_brc(is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered, done); 1412 1413 // if (left < right || ((code ~= cmpl) && (left <> right)) dst := -1 1414 __ z_lhi(dreg, -1); 1415 } 1416 } else { 1417 assert(code == lir_cmp_l2i, "check"); 1418 if (VM_Version::has_LoadStoreConditional()) { 1419 Register one = Z_R0_scratch; 1420 Register minus_one = Z_R1_scratch; 1421 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1422 __ z_lghi(minus_one, -1); 1423 __ z_lghi(one, 1); 1424 __ z_lghi(dreg, 0); 1425 __ z_locgr(dreg, one, Assembler::bcondHigh); 1426 __ z_locgr(dreg, minus_one, Assembler::bcondLow); 1427 } else { 1428 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1429 __ z_lghi(dreg, 0); // eq value 1430 __ z_bre(done); 1431 __ z_lghi(dreg, 1); // gt value 1432 __ z_brh(done); 1433 __ z_lghi(dreg, -1); // lt value 1434 } 1435 } 1436 __ bind(done); 1437 } 1438 1439 // result = condition ? opr1 : opr2 1440 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 1441 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 1442 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on s390"); 1443 1444 Assembler::branch_condition acond = Assembler::bcondEqual, ncond = Assembler::bcondNotEqual; 1445 switch (condition) { 1446 case lir_cond_equal: acond = Assembler::bcondEqual; ncond = Assembler::bcondNotEqual; break; 1447 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; ncond = Assembler::bcondEqual; break; 1448 case lir_cond_less: acond = Assembler::bcondLow; ncond = Assembler::bcondNotLow; break; 1449 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1450 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1451 case lir_cond_greater: acond = Assembler::bcondHigh; ncond = Assembler::bcondNotHigh; break; 1452 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1453 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1454 default: ShouldNotReachHere(); 1455 } 1456 1457 if (opr1->is_cpu_register()) { 1458 reg2reg(opr1, result); 1459 } else if (opr1->is_stack()) { 1460 stack2reg(opr1, result, result->type()); 1461 } else if (opr1->is_constant()) { 1462 const2reg(opr1, result, lir_patch_none, nullptr); 1463 } else { 1464 ShouldNotReachHere(); 1465 } 1466 1467 if (VM_Version::has_LoadStoreConditional() && !opr2->is_constant()) { 1468 // Optimized version that does not require a branch. 1469 if (opr2->is_single_cpu()) { 1470 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 1471 __ z_locgr(result->as_register(), opr2->as_register(), ncond); 1472 } else if (opr2->is_double_cpu()) { 1473 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1474 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1475 __ z_locgr(result->as_register_lo(), opr2->as_register_lo(), ncond); 1476 } else if (opr2->is_single_stack()) { 1477 __ z_loc(result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()), ncond); 1478 } else if (opr2->is_double_stack()) { 1479 __ z_locg(result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix()), ncond); 1480 } else { 1481 ShouldNotReachHere(); 1482 } 1483 } else { 1484 Label skip; 1485 __ z_brc(acond, skip); 1486 if (opr2->is_cpu_register()) { 1487 reg2reg(opr2, result); 1488 } else if (opr2->is_stack()) { 1489 stack2reg(opr2, result, result->type()); 1490 } else if (opr2->is_constant()) { 1491 const2reg(opr2, result, lir_patch_none, nullptr); 1492 } else { 1493 ShouldNotReachHere(); 1494 } 1495 __ bind(skip); 1496 } 1497 } 1498 1499 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1500 CodeEmitInfo* info) { 1501 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1502 1503 if (left->is_single_cpu()) { 1504 assert(left == dest, "left and dest must be equal"); 1505 Register lreg = left->as_register(); 1506 1507 if (right->is_single_cpu()) { 1508 // cpu register - cpu register 1509 Register rreg = right->as_register(); 1510 switch (code) { 1511 case lir_add: __ z_ar (lreg, rreg); break; 1512 case lir_sub: __ z_sr (lreg, rreg); break; 1513 case lir_mul: __ z_msr(lreg, rreg); break; 1514 default: ShouldNotReachHere(); 1515 } 1516 1517 } else if (right->is_stack()) { 1518 // cpu register - stack 1519 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1520 switch (code) { 1521 case lir_add: __ z_ay(lreg, raddr); break; 1522 case lir_sub: __ z_sy(lreg, raddr); break; 1523 default: ShouldNotReachHere(); 1524 } 1525 1526 } else if (right->is_constant()) { 1527 // cpu register - constant 1528 jint c = right->as_constant_ptr()->as_jint(); 1529 switch (code) { 1530 case lir_add: 1531 __ add2reg_32(lreg, c); 1532 break; 1533 case lir_sub: 1534 __ add2reg_32(lreg, java_negate(c)); 1535 break; 1536 case lir_mul: __ z_msfi(lreg, c); break; 1537 default: ShouldNotReachHere(); 1538 } 1539 1540 } else { 1541 ShouldNotReachHere(); 1542 } 1543 1544 } else if (left->is_double_cpu()) { 1545 assert(left == dest, "left and dest must be equal"); 1546 Register lreg_lo = left->as_register_lo(); 1547 Register lreg_hi = left->as_register_hi(); 1548 1549 if (right->is_double_cpu()) { 1550 // cpu register - cpu register 1551 Register rreg_lo = right->as_register_lo(); 1552 Register rreg_hi = right->as_register_hi(); 1553 assert_different_registers(lreg_lo, rreg_lo); 1554 switch (code) { 1555 case lir_add: 1556 __ z_agr(lreg_lo, rreg_lo); 1557 break; 1558 case lir_sub: 1559 __ z_sgr(lreg_lo, rreg_lo); 1560 break; 1561 case lir_mul: 1562 __ z_msgr(lreg_lo, rreg_lo); 1563 break; 1564 default: 1565 ShouldNotReachHere(); 1566 } 1567 1568 } else if (right->is_constant()) { 1569 // cpu register - constant 1570 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1571 switch (code) { 1572 case lir_add: __ z_agfi(lreg_lo, c); break; 1573 case lir_sub: 1574 if (c != min_jint) { 1575 __ z_agfi(lreg_lo, -c); 1576 } else { 1577 // -min_jint cannot be represented as simm32 in z_agfi 1578 // min_jint sign extended: 0xffffffff80000000 1579 // -min_jint as 64 bit integer: 0x0000000080000000 1580 // 0x80000000 can be represented as uimm32 in z_algfi 1581 // lreg_lo := lreg_lo + -min_jint == lreg_lo + 0x80000000 1582 __ z_algfi(lreg_lo, UCONST64(0x80000000)); 1583 } 1584 break; 1585 case lir_mul: __ z_msgfi(lreg_lo, c); break; 1586 default: 1587 ShouldNotReachHere(); 1588 } 1589 1590 } else { 1591 ShouldNotReachHere(); 1592 } 1593 1594 } else if (left->is_single_fpu()) { 1595 assert(left == dest, "left and dest must be equal"); 1596 FloatRegister lreg = left->as_float_reg(); 1597 FloatRegister rreg = right->is_single_fpu() ? right->as_float_reg() : fnoreg; 1598 Address raddr; 1599 1600 if (rreg == fnoreg) { 1601 assert(right->is_single_stack(), "constants should be loaded into register"); 1602 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1603 if (!Immediate::is_uimm12(raddr.disp())) { 1604 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, false); 1605 } 1606 } 1607 1608 if (rreg != fnoreg) { 1609 switch (code) { 1610 case lir_add: __ z_aebr(lreg, rreg); break; 1611 case lir_sub: __ z_sebr(lreg, rreg); break; 1612 case lir_mul: __ z_meebr(lreg, rreg); break; 1613 case lir_div: __ z_debr(lreg, rreg); break; 1614 default: ShouldNotReachHere(); 1615 } 1616 } else { 1617 switch (code) { 1618 case lir_add: __ z_aeb(lreg, raddr); break; 1619 case lir_sub: __ z_seb(lreg, raddr); break; 1620 case lir_mul: __ z_meeb(lreg, raddr); break; 1621 case lir_div: __ z_deb(lreg, raddr); break; 1622 default: ShouldNotReachHere(); 1623 } 1624 } 1625 } else if (left->is_double_fpu()) { 1626 assert(left == dest, "left and dest must be equal"); 1627 FloatRegister lreg = left->as_double_reg(); 1628 FloatRegister rreg = right->is_double_fpu() ? right->as_double_reg() : fnoreg; 1629 Address raddr; 1630 1631 if (rreg == fnoreg) { 1632 assert(right->is_double_stack(), "constants should be loaded into register"); 1633 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 1634 if (!Immediate::is_uimm12(raddr.disp())) { 1635 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, true); 1636 } 1637 } 1638 1639 if (rreg != fnoreg) { 1640 switch (code) { 1641 case lir_add: __ z_adbr(lreg, rreg); break; 1642 case lir_sub: __ z_sdbr(lreg, rreg); break; 1643 case lir_mul: __ z_mdbr(lreg, rreg); break; 1644 case lir_div: __ z_ddbr(lreg, rreg); break; 1645 default: ShouldNotReachHere(); 1646 } 1647 } else { 1648 switch (code) { 1649 case lir_add: __ z_adb(lreg, raddr); break; 1650 case lir_sub: __ z_sdb(lreg, raddr); break; 1651 case lir_mul: __ z_mdb(lreg, raddr); break; 1652 case lir_div: __ z_ddb(lreg, raddr); break; 1653 default: ShouldNotReachHere(); 1654 } 1655 } 1656 } else if (left->is_address()) { 1657 assert(left == dest, "left and dest must be equal"); 1658 assert(code == lir_add, "unsupported operation"); 1659 assert(right->is_constant(), "unsupported operand"); 1660 jint c = right->as_constant_ptr()->as_jint(); 1661 LIR_Address* lir_addr = left->as_address_ptr(); 1662 Address addr = as_Address(lir_addr); 1663 switch (lir_addr->type()) { 1664 case T_INT: 1665 __ add2mem_32(addr, c, Z_R1_scratch); 1666 break; 1667 case T_LONG: 1668 __ add2mem_64(addr, c, Z_R1_scratch); 1669 break; 1670 default: 1671 ShouldNotReachHere(); 1672 } 1673 } else { 1674 ShouldNotReachHere(); 1675 } 1676 } 1677 1678 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1679 switch (code) { 1680 case lir_sqrt: { 1681 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1682 FloatRegister src_reg = value->as_double_reg(); 1683 FloatRegister dst_reg = dest->as_double_reg(); 1684 __ z_sqdbr(dst_reg, src_reg); 1685 break; 1686 } 1687 case lir_abs: { 1688 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1689 FloatRegister src_reg = value->as_double_reg(); 1690 FloatRegister dst_reg = dest->as_double_reg(); 1691 __ z_lpdbr(dst_reg, src_reg); 1692 break; 1693 } 1694 default: { 1695 ShouldNotReachHere(); 1696 break; 1697 } 1698 } 1699 } 1700 1701 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1702 if (left->is_single_cpu()) { 1703 Register reg = left->as_register(); 1704 if (right->is_constant()) { 1705 int val = right->as_constant_ptr()->as_jint(); 1706 switch (code) { 1707 case lir_logic_and: __ z_nilf(reg, val); break; 1708 case lir_logic_or: __ z_oilf(reg, val); break; 1709 case lir_logic_xor: __ z_xilf(reg, val); break; 1710 default: ShouldNotReachHere(); 1711 } 1712 } else if (right->is_stack()) { 1713 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1714 switch (code) { 1715 case lir_logic_and: __ z_ny(reg, raddr); break; 1716 case lir_logic_or: __ z_oy(reg, raddr); break; 1717 case lir_logic_xor: __ z_xy(reg, raddr); break; 1718 default: ShouldNotReachHere(); 1719 } 1720 } else { 1721 Register rright = right->as_register(); 1722 switch (code) { 1723 case lir_logic_and: __ z_nr(reg, rright); break; 1724 case lir_logic_or : __ z_or(reg, rright); break; 1725 case lir_logic_xor: __ z_xr(reg, rright); break; 1726 default: ShouldNotReachHere(); 1727 } 1728 } 1729 move_regs(reg, dst->as_register()); 1730 } else { 1731 Register l_lo = left->as_register_lo(); 1732 if (right->is_constant()) { 1733 __ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong()); 1734 switch (code) { 1735 case lir_logic_and: 1736 __ z_ngr(l_lo, Z_R1_scratch); 1737 break; 1738 case lir_logic_or: 1739 __ z_ogr(l_lo, Z_R1_scratch); 1740 break; 1741 case lir_logic_xor: 1742 __ z_xgr(l_lo, Z_R1_scratch); 1743 break; 1744 default: ShouldNotReachHere(); 1745 } 1746 } else { 1747 Register r_lo; 1748 if (is_reference_type(right->type())) { 1749 r_lo = right->as_register(); 1750 } else { 1751 r_lo = right->as_register_lo(); 1752 } 1753 switch (code) { 1754 case lir_logic_and: 1755 __ z_ngr(l_lo, r_lo); 1756 break; 1757 case lir_logic_or: 1758 __ z_ogr(l_lo, r_lo); 1759 break; 1760 case lir_logic_xor: 1761 __ z_xgr(l_lo, r_lo); 1762 break; 1763 default: ShouldNotReachHere(); 1764 } 1765 } 1766 1767 Register dst_lo = dst->as_register_lo(); 1768 1769 move_regs(l_lo, dst_lo); 1770 } 1771 } 1772 1773 // See operand selection in LIRGenerator::do_ArithmeticOp_Int(). 1774 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 1775 if (left->is_double_cpu()) { 1776 // 64 bit integer case 1777 assert(left->is_double_cpu(), "left must be register"); 1778 assert(right->is_double_cpu() || is_power_of_2(right->as_jlong()), 1779 "right must be register or power of 2 constant"); 1780 assert(result->is_double_cpu(), "result must be register"); 1781 1782 Register lreg = left->as_register_lo(); 1783 Register dreg = result->as_register_lo(); 1784 1785 if (right->is_constant()) { 1786 // Convert division by a power of two into some shifts and logical operations. 1787 Register treg1 = Z_R0_scratch; 1788 Register treg2 = Z_R1_scratch; 1789 jlong divisor = right->as_jlong(); 1790 jlong log_divisor = log2i_exact(right->as_jlong()); 1791 1792 if (divisor == min_jlong) { 1793 // Min_jlong is special. Result is '0' except for min_jlong/min_jlong = 1. 1794 if (dreg == lreg) { 1795 NearLabel done; 1796 __ load_const_optimized(treg2, min_jlong); 1797 __ z_cgr(lreg, treg2); 1798 __ z_lghi(dreg, 0); // Preserves condition code. 1799 __ z_brne(done); 1800 __ z_lghi(dreg, 1); // min_jlong / min_jlong = 1 1801 __ bind(done); 1802 } else { 1803 assert_different_registers(dreg, lreg); 1804 NearLabel done; 1805 __ z_lghi(dreg, 0); 1806 __ compare64_and_branch(lreg, min_jlong, Assembler::bcondNotEqual, done); 1807 __ z_lghi(dreg, 1); 1808 __ bind(done); 1809 } 1810 return; 1811 } 1812 __ move_reg_if_needed(dreg, T_LONG, lreg, T_LONG); 1813 if (divisor == 2) { 1814 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1815 } else { 1816 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1817 __ and_imm(treg2, divisor - 1, treg1, true); 1818 } 1819 if (code == lir_idiv) { 1820 __ z_agr(dreg, treg2); 1821 __ z_srag(dreg, dreg, log_divisor); 1822 } else { 1823 assert(code == lir_irem, "check"); 1824 __ z_agr(treg2, dreg); 1825 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1826 __ z_sgr(dreg, treg2); 1827 } 1828 return; 1829 } 1830 1831 // Divisor is not a power of 2 constant. 1832 Register rreg = right->as_register_lo(); 1833 Register treg = temp->as_register_lo(); 1834 assert(right->is_double_cpu(), "right must be register"); 1835 assert(lreg == Z_R11, "see ldivInOpr()"); 1836 assert(rreg != lreg, "right register must not be same as left register"); 1837 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) || 1838 (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see ldivInOpr(), ldivOutOpr(), lremOutOpr()"); 1839 1840 Register R1 = lreg->predecessor(); 1841 Register R2 = rreg; 1842 assert(code != lir_idiv || lreg==dreg, "see code below"); 1843 if (code == lir_idiv) { 1844 __ z_lcgr(lreg, lreg); 1845 } else { 1846 __ clear_reg(dreg, true, false); 1847 } 1848 NearLabel done; 1849 __ compare64_and_branch(R2, -1, Assembler::bcondEqual, done); 1850 if (code == lir_idiv) { 1851 __ z_lcgr(lreg, lreg); // Revert lcgr above. 1852 } 1853 if (ImplicitDiv0Checks) { 1854 // No debug info because the idiv won't trap. 1855 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1856 // which is unnecessary, too. 1857 add_debug_info_for_div0(__ offset(), info); 1858 } 1859 __ z_dsgr(R1, R2); 1860 __ bind(done); 1861 return; 1862 } 1863 1864 // 32 bit integer case 1865 1866 assert(left->is_single_cpu(), "left must be register"); 1867 assert(right->is_single_cpu() || is_power_of_2(right->as_jint()), "right must be register or power of 2 constant"); 1868 assert(result->is_single_cpu(), "result must be register"); 1869 1870 Register lreg = left->as_register(); 1871 Register dreg = result->as_register(); 1872 1873 if (right->is_constant()) { 1874 // Convert division by a power of two into some shifts and logical operations. 1875 Register treg1 = Z_R0_scratch; 1876 Register treg2 = Z_R1_scratch; 1877 jlong divisor = right->as_jint(); 1878 jlong log_divisor = log2i_exact(right->as_jint()); 1879 __ move_reg_if_needed(dreg, T_LONG, lreg, T_INT); // sign extend 1880 if (divisor == 2) { 1881 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1882 } else { 1883 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1884 __ and_imm(treg2, divisor - 1, treg1, true); 1885 } 1886 if (code == lir_idiv) { 1887 __ z_agr(dreg, treg2); 1888 __ z_srag(dreg, dreg, log_divisor); 1889 } else { 1890 assert(code == lir_irem, "check"); 1891 __ z_agr(treg2, dreg); 1892 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1893 __ z_sgr(dreg, treg2); 1894 } 1895 return; 1896 } 1897 1898 // Divisor is not a power of 2 constant. 1899 Register rreg = right->as_register(); 1900 Register treg = temp->as_register(); 1901 assert(right->is_single_cpu(), "right must be register"); 1902 assert(lreg == Z_R11, "left register must be rax,"); 1903 assert(rreg != lreg, "right register must not be same as left register"); 1904 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) 1905 || (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see divInOpr(), divOutOpr(), remOutOpr()"); 1906 1907 Register R1 = lreg->predecessor(); 1908 Register R2 = rreg; 1909 __ move_reg_if_needed(lreg, T_LONG, lreg, T_INT); // sign extend 1910 if (ImplicitDiv0Checks) { 1911 // No debug info because the idiv won't trap. 1912 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1913 // which is unnecessary, too. 1914 add_debug_info_for_div0(__ offset(), info); 1915 } 1916 __ z_dsgfr(R1, R2); 1917 } 1918 1919 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1920 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1921 assert(exceptionPC->as_register() == Z_EXC_PC, "should match"); 1922 1923 // Exception object is not added to oop map by LinearScan 1924 // (LinearScan assumes that no oops are in fixed registers). 1925 info->add_register_oop(exceptionOop); 1926 1927 // Reuse the debug info from the safepoint poll for the throw op itself. 1928 __ get_PC(Z_EXC_PC); 1929 add_call_info(__ offset(), info); // for exception handler 1930 address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? StubId::c1_handle_exception_id 1931 : StubId::c1_handle_exception_nofpu_id); 1932 emit_call_c(stub); 1933 } 1934 1935 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1936 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1937 1938 __ branch_optimized(Assembler::bcondAlways, _unwind_handler_entry); 1939 } 1940 1941 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1942 ciArrayKlass* default_type = op->expected_type(); 1943 Register src = op->src()->as_register(); 1944 Register dst = op->dst()->as_register(); 1945 Register src_pos = op->src_pos()->as_register(); 1946 Register dst_pos = op->dst_pos()->as_register(); 1947 Register length = op->length()->as_register(); 1948 Register tmp = op->tmp()->as_register(); 1949 1950 CodeStub* stub = op->stub(); 1951 int flags = op->flags(); 1952 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 1953 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1954 1955 // If we don't know anything, just go through the generic arraycopy. 1956 if (default_type == nullptr) { 1957 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1958 1959 if (copyfunc_addr == nullptr) { 1960 // Take a slow path for generic arraycopy. 1961 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 1962 __ bind(*stub->continuation()); 1963 return; 1964 } 1965 1966 // Save outgoing arguments in callee saved registers (C convention) in case 1967 // a call to System.arraycopy is needed. 1968 Register callee_saved_src = Z_R10; 1969 Register callee_saved_src_pos = Z_R11; 1970 Register callee_saved_dst = Z_R12; 1971 Register callee_saved_dst_pos = Z_R13; 1972 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 1973 1974 __ lgr_if_needed(callee_saved_src, src); 1975 __ lgr_if_needed(callee_saved_src_pos, src_pos); 1976 __ lgr_if_needed(callee_saved_dst, dst); 1977 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 1978 __ lgr_if_needed(callee_saved_length, length); 1979 1980 // C function requires 64 bit values. 1981 __ z_lgfr(src_pos, src_pos); 1982 __ z_lgfr(dst_pos, dst_pos); 1983 __ z_lgfr(length, length); 1984 1985 // Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint. 1986 1987 // The arguments are in the corresponding registers. 1988 assert(Z_ARG1 == src, "assumption"); 1989 assert(Z_ARG2 == src_pos, "assumption"); 1990 assert(Z_ARG3 == dst, "assumption"); 1991 assert(Z_ARG4 == dst_pos, "assumption"); 1992 assert(Z_ARG5 == length, "assumption"); 1993 #ifndef PRODUCT 1994 if (PrintC1Statistics) { 1995 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt); 1996 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 1997 } 1998 #endif 1999 emit_call_c(copyfunc_addr); 2000 CHECK_BAILOUT(); 2001 2002 __ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 2003 2004 __ z_lgr(tmp, Z_RET); 2005 __ z_xilf(tmp, -1); 2006 2007 // Restore values from callee saved registers so they are where the stub 2008 // expects them. 2009 __ lgr_if_needed(src, callee_saved_src); 2010 __ lgr_if_needed(src_pos, callee_saved_src_pos); 2011 __ lgr_if_needed(dst, callee_saved_dst); 2012 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2013 __ lgr_if_needed(length, callee_saved_length); 2014 2015 __ z_sr(length, tmp); 2016 __ z_ar(src_pos, tmp); 2017 __ z_ar(dst_pos, tmp); 2018 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2019 2020 __ bind(*stub->continuation()); 2021 return; 2022 } 2023 2024 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 2025 2026 int elem_size = type2aelembytes(basic_type); 2027 int shift_amount; 2028 2029 switch (elem_size) { 2030 case 1 : 2031 shift_amount = 0; 2032 break; 2033 case 2 : 2034 shift_amount = 1; 2035 break; 2036 case 4 : 2037 shift_amount = 2; 2038 break; 2039 case 8 : 2040 shift_amount = 3; 2041 break; 2042 default: 2043 shift_amount = -1; 2044 ShouldNotReachHere(); 2045 } 2046 2047 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 2048 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 2049 2050 // Length and pos's are all sign extended at this point on 64bit. 2051 2052 // test for null 2053 if (flags & LIR_OpArrayCopy::src_null_check) { 2054 __ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2055 } 2056 if (flags & LIR_OpArrayCopy::dst_null_check) { 2057 __ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2058 } 2059 2060 // Check if negative. 2061 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2062 __ compare32_and_branch(src_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2063 } 2064 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2065 __ compare32_and_branch(dst_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2066 } 2067 2068 // If the compiler was not able to prove that exact type of the source or the destination 2069 // of the arraycopy is an array type, check at runtime if the source or the destination is 2070 // an instance type. 2071 if (flags & LIR_OpArrayCopy::type_check) { 2072 assert(Klass::_lh_neutral_value == 0, "or replace z_lt instructions"); 2073 2074 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2075 __ load_klass(tmp, dst); 2076 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2077 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2078 } 2079 2080 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2081 __ load_klass(tmp, src); 2082 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2083 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2084 } 2085 } 2086 2087 if (flags & LIR_OpArrayCopy::src_range_check) { 2088 __ z_la(tmp, Address(src_pos, length)); 2089 __ z_cl(tmp, src_length_addr); 2090 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2091 } 2092 if (flags & LIR_OpArrayCopy::dst_range_check) { 2093 __ z_la(tmp, Address(dst_pos, length)); 2094 __ z_cl(tmp, dst_length_addr); 2095 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2096 } 2097 2098 if (flags & LIR_OpArrayCopy::length_positive_check) { 2099 __ z_ltr(length, length); 2100 __ branch_optimized(Assembler::bcondNegative, *stub->entry()); 2101 } 2102 2103 // Stubs require 64 bit values. 2104 __ z_lgfr(src_pos, src_pos); // int -> long 2105 __ z_lgfr(dst_pos, dst_pos); // int -> long 2106 __ z_lgfr(length, length); // int -> long 2107 2108 if (flags & LIR_OpArrayCopy::type_check) { 2109 // We don't know the array types are compatible. 2110 if (basic_type != T_OBJECT) { 2111 // Simple test for basic type arrays. 2112 __ cmp_klasses_from_objects(src, dst, tmp, Z_R1_scratch); 2113 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2114 } else { 2115 // For object arrays, if src is a sub class of dst then we can 2116 // safely do the copy. 2117 NearLabel cont, slow; 2118 Register src_klass = Z_R1_scratch; 2119 Register dst_klass = Z_R10; 2120 2121 __ load_klass(src_klass, src); 2122 __ load_klass(dst_klass, dst); 2123 2124 __ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, nullptr); 2125 2126 store_parameter(src_klass, 0); // sub 2127 store_parameter(dst_klass, 1); // super 2128 emit_call_c(Runtime1::entry_for (StubId::c1_slow_subtype_check_id)); 2129 CHECK_BAILOUT2(cont, slow); 2130 // Sets condition code 0 for match (2 otherwise). 2131 __ branch_optimized(Assembler::bcondEqual, cont); 2132 2133 __ bind(slow); 2134 2135 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2136 if (copyfunc_addr != nullptr) { // use stub if available 2137 // Src is not a sub class of dst so we have to do a 2138 // per-element check. 2139 2140 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2141 if ((flags & mask) != mask) { 2142 // Check that at least both of them object arrays. 2143 assert(flags & mask, "one of the two should be known to be an object array"); 2144 2145 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2146 __ load_klass(tmp, src); 2147 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2148 __ load_klass(tmp, dst); 2149 } 2150 Address klass_lh_addr(tmp, Klass::layout_helper_offset()); 2151 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2152 __ load_const_optimized(Z_R1_scratch, objArray_lh); 2153 __ z_c(Z_R1_scratch, klass_lh_addr); 2154 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2155 } 2156 2157 // Save outgoing arguments in callee saved registers (C convention) in case 2158 // a call to System.arraycopy is needed. 2159 Register callee_saved_src = Z_R10; 2160 Register callee_saved_src_pos = Z_R11; 2161 Register callee_saved_dst = Z_R12; 2162 Register callee_saved_dst_pos = Z_R13; 2163 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 2164 2165 __ lgr_if_needed(callee_saved_src, src); 2166 __ lgr_if_needed(callee_saved_src_pos, src_pos); 2167 __ lgr_if_needed(callee_saved_dst, dst); 2168 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 2169 __ lgr_if_needed(callee_saved_length, length); 2170 2171 __ z_llgfr(length, length); // Higher 32bits must be null. 2172 2173 __ z_sllg(Z_ARG1, src_pos, shift_amount); // index -> byte offset 2174 __ z_sllg(Z_ARG2, dst_pos, shift_amount); // index -> byte offset 2175 2176 __ z_la(Z_ARG1, Address(src, Z_ARG1, arrayOopDesc::base_offset_in_bytes(basic_type))); 2177 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2178 __ z_la(Z_ARG2, Address(dst, Z_ARG2, arrayOopDesc::base_offset_in_bytes(basic_type))); 2179 assert_different_registers(Z_ARG2, dst, length); 2180 2181 __ z_lgr(Z_ARG3, length); 2182 assert_different_registers(Z_ARG3, dst); 2183 2184 __ load_klass(Z_ARG5, dst); 2185 __ z_lg(Z_ARG5, Address(Z_ARG5, ObjArrayKlass::element_klass_offset())); 2186 __ z_lg(Z_ARG4, Address(Z_ARG5, Klass::super_check_offset_offset())); 2187 emit_call_c(copyfunc_addr); 2188 CHECK_BAILOUT2(cont, slow); 2189 2190 #ifndef PRODUCT 2191 if (PrintC1Statistics) { 2192 NearLabel failed; 2193 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondNotEqual, failed); 2194 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_cnt); 2195 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2196 __ bind(failed); 2197 } 2198 #endif 2199 2200 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 2201 2202 #ifndef PRODUCT 2203 if (PrintC1Statistics) { 2204 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_attempt_cnt); 2205 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2206 } 2207 #endif 2208 2209 __ z_lgr(tmp, Z_RET); 2210 __ z_xilf(tmp, -1); 2211 2212 // Restore previously spilled arguments 2213 __ lgr_if_needed(src, callee_saved_src); 2214 __ lgr_if_needed(src_pos, callee_saved_src_pos); 2215 __ lgr_if_needed(dst, callee_saved_dst); 2216 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2217 __ lgr_if_needed(length, callee_saved_length); 2218 2219 __ z_sr(length, tmp); 2220 __ z_ar(src_pos, tmp); 2221 __ z_ar(dst_pos, tmp); 2222 } 2223 2224 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2225 2226 __ bind(cont); 2227 } 2228 } 2229 2230 #ifdef ASSERT 2231 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2232 // Sanity check the known type with the incoming class. For the 2233 // primitive case the types must match exactly with src.klass and 2234 // dst.klass each exactly matching the default type. For the 2235 // object array case, if no type check is needed then either the 2236 // dst type is exactly the expected type and the src type is a 2237 // subtype which we can't check or src is the same array as dst 2238 // but not necessarily exactly of type default_type. 2239 NearLabel known_ok, halt; 2240 metadata2reg(default_type->constant_encoding(), tmp); 2241 if (UseCompressedClassPointers) { 2242 __ encode_klass_not_null(tmp); 2243 } 2244 2245 if (basic_type != T_OBJECT) { 2246 __ cmp_klass(tmp, dst, Z_R1_scratch); 2247 __ branch_optimized(Assembler::bcondNotEqual, halt); 2248 2249 __ cmp_klass(tmp, src, Z_R1_scratch); 2250 __ branch_optimized(Assembler::bcondEqual, known_ok); 2251 } else { 2252 __ cmp_klass(tmp, dst, Z_R1_scratch); 2253 __ branch_optimized(Assembler::bcondEqual, known_ok); 2254 __ compareU64_and_branch(src, dst, Assembler::bcondEqual, known_ok); 2255 } 2256 __ bind(halt); 2257 __ stop("incorrect type information in arraycopy"); 2258 __ bind(known_ok); 2259 } 2260 #endif 2261 2262 #ifndef PRODUCT 2263 if (PrintC1Statistics) { 2264 __ load_const_optimized(Z_R1_scratch, Runtime1::arraycopy_count_address(basic_type)); 2265 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2266 } 2267 #endif 2268 2269 __ z_sllg(tmp, src_pos, shift_amount); // index -> byte offset 2270 __ z_sllg(Z_R1_scratch, dst_pos, shift_amount); // index -> byte offset 2271 2272 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2273 __ z_la(Z_ARG1, Address(src, tmp, arrayOopDesc::base_offset_in_bytes(basic_type))); 2274 assert_different_registers(Z_ARG2, length); 2275 __ z_la(Z_ARG2, Address(dst, Z_R1_scratch, arrayOopDesc::base_offset_in_bytes(basic_type))); 2276 __ lgr_if_needed(Z_ARG3, length); 2277 2278 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2279 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2280 const char *name; 2281 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2282 __ call_VM_leaf(entry); 2283 2284 if (stub != nullptr) { 2285 __ bind(*stub->continuation()); 2286 } 2287 } 2288 2289 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2290 if (dest->is_single_cpu()) { 2291 if (left->type() == T_OBJECT) { 2292 switch (code) { 2293 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2294 case lir_shr: __ z_srag (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2295 case lir_ushr: __ z_srlg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2296 default: ShouldNotReachHere(); 2297 } 2298 } else { 2299 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2300 Register masked_count = Z_R1_scratch; 2301 __ z_lr(masked_count, count->as_register()); 2302 __ z_nill(masked_count, 31); 2303 switch (code) { 2304 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, masked_count); break; 2305 case lir_shr: __ z_sra (dest->as_register(), 0, masked_count); break; 2306 case lir_ushr: __ z_srl (dest->as_register(), 0, masked_count); break; 2307 default: ShouldNotReachHere(); 2308 } 2309 } 2310 } else { 2311 switch (code) { 2312 case lir_shl: __ z_sllg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2313 case lir_shr: __ z_srag (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2314 case lir_ushr: __ z_srlg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2315 default: ShouldNotReachHere(); 2316 } 2317 } 2318 } 2319 2320 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2321 if (left->type() == T_OBJECT) { 2322 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2323 Register l = left->as_register(); 2324 Register d = dest->as_register_lo(); 2325 switch (code) { 2326 case lir_shl: __ z_sllg (d, l, count); break; 2327 case lir_shr: __ z_srag (d, l, count); break; 2328 case lir_ushr: __ z_srlg (d, l, count); break; 2329 default: ShouldNotReachHere(); 2330 } 2331 return; 2332 } 2333 if (dest->is_single_cpu()) { 2334 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2335 count = count & 0x1F; // Java spec 2336 switch (code) { 2337 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), count); break; 2338 case lir_shr: __ z_sra (dest->as_register(), count); break; 2339 case lir_ushr: __ z_srl (dest->as_register(), count); break; 2340 default: ShouldNotReachHere(); 2341 } 2342 } else if (dest->is_double_cpu()) { 2343 count = count & 63; // Java spec 2344 Register l = left->as_pointer_register(); 2345 Register d = dest->as_pointer_register(); 2346 switch (code) { 2347 case lir_shl: __ z_sllg (d, l, count); break; 2348 case lir_shr: __ z_srag (d, l, count); break; 2349 case lir_ushr: __ z_srlg (d, l, count); break; 2350 default: ShouldNotReachHere(); 2351 } 2352 } else { 2353 ShouldNotReachHere(); 2354 } 2355 } 2356 2357 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2358 if (op->init_check()) { 2359 // Make sure klass is initialized & doesn't have finalizer. 2360 // init_state needs acquire, but S390 is TSO, and so we are already good. 2361 const int state_offset = in_bytes(InstanceKlass::init_state_offset()); 2362 Register iklass = op->klass()->as_register(); 2363 add_debug_info_for_null_check_here(op->stub()->info()); 2364 if (Immediate::is_uimm12(state_offset)) { 2365 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized); 2366 } else { 2367 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized); 2368 } 2369 __ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far. 2370 } 2371 __ allocate_object(op->obj()->as_register(), 2372 op->tmp1()->as_register(), 2373 op->tmp2()->as_register(), 2374 op->header_size(), 2375 op->object_size(), 2376 op->klass()->as_register(), 2377 *op->stub()->entry()); 2378 __ bind(*op->stub()->continuation()); 2379 __ verify_oop(op->obj()->as_register(), FILE_AND_LINE); 2380 } 2381 2382 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2383 Register len = op->len()->as_register(); 2384 __ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend 2385 2386 if (UseSlowPath || 2387 (!UseFastNewObjectArray && (is_reference_type(op->type()))) || 2388 (!UseFastNewTypeArray && (!is_reference_type(op->type())))) { 2389 __ z_brul(*op->stub()->entry()); 2390 } else { 2391 __ allocate_array(op->obj()->as_register(), 2392 op->len()->as_register(), 2393 op->tmp1()->as_register(), 2394 op->tmp2()->as_register(), 2395 arrayOopDesc::base_offset_in_bytes(op->type()), 2396 type2aelembytes(op->type()), 2397 op->klass()->as_register(), 2398 *op->stub()->entry(), 2399 op->zero_array()); 2400 } 2401 __ bind(*op->stub()->continuation()); 2402 } 2403 2404 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, 2405 Register recv, Register tmp1, Label* update_done) { 2406 uint i; 2407 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2408 Label next_test; 2409 // See if the receiver is receiver[n]. 2410 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2411 __ z_cg(recv, receiver_addr); 2412 __ z_brne(next_test); 2413 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 2414 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2415 __ branch_optimized(Assembler::bcondAlways, *update_done); 2416 __ bind(next_test); 2417 } 2418 2419 // Didn't find receiver; find next empty slot and fill it in. 2420 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2421 Label next_test; 2422 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2423 __ z_ltg(Z_R0_scratch, recv_addr); 2424 __ z_brne(next_test); 2425 __ z_stg(recv, recv_addr); 2426 __ load_const_optimized(tmp1, DataLayout::counter_increment); 2427 __ z_stg(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)), mdo); 2428 __ branch_optimized(Assembler::bcondAlways, *update_done); 2429 __ bind(next_test); 2430 } 2431 } 2432 2433 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2434 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2435 Unimplemented(); 2436 } 2437 2438 void LIR_Assembler::store_parameter(Register r, int param_num) { 2439 assert(param_num >= 0, "invalid num"); 2440 int offset_in_bytes = param_num * BytesPerWord; 2441 check_reserved_argument_area(offset_in_bytes); 2442 offset_in_bytes += FrameMap::first_available_sp_in_frame; 2443 __ z_stg(r, offset_in_bytes, Z_SP); 2444 } 2445 2446 void LIR_Assembler::store_parameter(jint c, int param_num) { 2447 assert(param_num >= 0, "invalid num"); 2448 int offset_in_bytes = param_num * BytesPerWord; 2449 check_reserved_argument_area(offset_in_bytes); 2450 offset_in_bytes += FrameMap::first_available_sp_in_frame; 2451 __ store_const(Address(Z_SP, offset_in_bytes), c, Z_R1_scratch, true); 2452 } 2453 2454 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2455 // We always need a stub for the failure case. 2456 CodeStub* stub = op->stub(); 2457 Register obj = op->object()->as_register(); 2458 Register k_RInfo = op->tmp1()->as_register(); 2459 Register klass_RInfo = op->tmp2()->as_register(); 2460 Register dst = op->result_opr()->as_register(); 2461 Register Rtmp1 = Z_R1_scratch; 2462 ciKlass* k = op->klass(); 2463 2464 assert(!op->tmp3()->is_valid(), "tmp3's not needed"); 2465 2466 // Check if it needs to be profiled. 2467 ciMethodData* md = nullptr; 2468 ciProfileData* data = nullptr; 2469 2470 if (op->should_profile()) { 2471 ciMethod* method = op->profiled_method(); 2472 assert(method != nullptr, "Should have method"); 2473 int bci = op->profiled_bci(); 2474 md = method->method_data_or_null(); 2475 assert(md != nullptr, "Sanity"); 2476 data = md->bci_to_data(bci); 2477 assert(data != nullptr, "need data for type check"); 2478 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2479 } 2480 2481 // Temp operands do not overlap with inputs, if this is their last 2482 // use (end of range is exclusive), so a register conflict is possible. 2483 if (obj == k_RInfo) { 2484 k_RInfo = dst; 2485 } else if (obj == klass_RInfo) { 2486 klass_RInfo = dst; 2487 } 2488 assert_different_registers(obj, k_RInfo, klass_RInfo); 2489 2490 if (op->should_profile()) { 2491 Register mdo = klass_RInfo; 2492 metadata2reg(md->constant_encoding(), mdo); 2493 NearLabel not_null; 2494 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2495 // Object is null; update MDO and exit. 2496 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2497 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2498 __ or2mem_8(data_addr, header_bits); 2499 __ branch_optimized(Assembler::bcondAlways, *obj_is_null); 2500 __ bind(not_null); 2501 2502 NearLabel update_done; 2503 Register recv = k_RInfo; 2504 __ load_klass(recv, obj); 2505 type_profile_helper(mdo, md, data, recv, Rtmp1, &update_done); 2506 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2507 __ add2mem_64(counter_addr, DataLayout::counter_increment, Rtmp1); 2508 __ bind(update_done); 2509 } else { 2510 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondEqual, *obj_is_null); 2511 } 2512 2513 Label *failure_target = failure; 2514 Label *success_target = success; 2515 2516 // Patching may screw with our temporaries, 2517 // so let's do it before loading the class. 2518 if (k->is_loaded()) { 2519 metadata2reg(k->constant_encoding(), k_RInfo); 2520 } else { 2521 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2522 } 2523 assert(obj != k_RInfo, "must be different"); 2524 2525 __ verify_oop(obj, FILE_AND_LINE); 2526 2527 // Get object class. 2528 // Not a safepoint as obj null check happens earlier. 2529 if (op->fast_check()) { 2530 if (UseCompressedClassPointers) { 2531 __ load_klass(klass_RInfo, obj); 2532 __ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target); 2533 } else { 2534 __ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 2535 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2536 } 2537 // Successful cast, fall through to profile or jump. 2538 } else { 2539 bool need_slow_path = !k->is_loaded() || 2540 ((int) k->super_check_offset() == in_bytes(Klass::secondary_super_cache_offset())); 2541 __ load_klass(klass_RInfo, obj); 2542 // Perform the fast part of the checking logic. 2543 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, 2544 (need_slow_path ? success_target : nullptr), 2545 failure_target, nullptr); 2546 if (need_slow_path) { 2547 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2548 address a = Runtime1::entry_for (StubId::c1_slow_subtype_check_id); 2549 store_parameter(klass_RInfo, 0); // sub 2550 store_parameter(k_RInfo, 1); // super 2551 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2552 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2553 // Fall through to success case. 2554 } 2555 } 2556 2557 __ branch_optimized(Assembler::bcondAlways, *success); 2558 } 2559 2560 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2561 LIR_Code code = op->code(); 2562 if (code == lir_store_check) { 2563 Register value = op->object()->as_register(); 2564 Register array = op->array()->as_register(); 2565 Register k_RInfo = op->tmp1()->as_register(); 2566 Register klass_RInfo = op->tmp2()->as_register(); 2567 Register Rtmp1 = Z_R1_scratch; 2568 2569 CodeStub* stub = op->stub(); 2570 2571 // Check if it needs to be profiled. 2572 ciMethodData* md = nullptr; 2573 ciProfileData* data = nullptr; 2574 2575 assert_different_registers(value, k_RInfo, klass_RInfo); 2576 2577 if (op->should_profile()) { 2578 ciMethod* method = op->profiled_method(); 2579 assert(method != nullptr, "Should have method"); 2580 int bci = op->profiled_bci(); 2581 md = method->method_data_or_null(); 2582 assert(md != nullptr, "Sanity"); 2583 data = md->bci_to_data(bci); 2584 assert(data != nullptr, "need data for type check"); 2585 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2586 } 2587 NearLabel done; 2588 Label *success_target = &done; 2589 Label *failure_target = stub->entry(); 2590 2591 if (op->should_profile()) { 2592 Register mdo = klass_RInfo; 2593 metadata2reg(md->constant_encoding(), mdo); 2594 NearLabel not_null; 2595 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2596 // Object is null; update MDO and exit. 2597 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2598 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2599 __ or2mem_8(data_addr, header_bits); 2600 __ branch_optimized(Assembler::bcondAlways, done); 2601 __ bind(not_null); 2602 2603 NearLabel update_done; 2604 Register recv = k_RInfo; 2605 __ load_klass(recv, value); 2606 type_profile_helper(mdo, md, data, recv, Rtmp1, &update_done); 2607 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2608 __ add2mem_64(counter_addr, DataLayout::counter_increment, Rtmp1); 2609 __ bind(update_done); 2610 } else { 2611 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondEqual, done); 2612 } 2613 2614 add_debug_info_for_null_check_here(op->info_for_exception()); 2615 __ load_klass(k_RInfo, array); 2616 __ load_klass(klass_RInfo, value); 2617 2618 // Get instance klass (it's already uncompressed). 2619 __ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 2620 // Perform the fast part of the checking logic. 2621 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 2622 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2623 address a = Runtime1::entry_for (StubId::c1_slow_subtype_check_id); 2624 store_parameter(klass_RInfo, 0); // sub 2625 store_parameter(k_RInfo, 1); // super 2626 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2627 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2628 // Fall through to success case. 2629 2630 __ bind(done); 2631 } else { 2632 if (code == lir_checkcast) { 2633 Register obj = op->object()->as_register(); 2634 Register dst = op->result_opr()->as_register(); 2635 NearLabel success; 2636 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2637 __ bind(success); 2638 __ lgr_if_needed(dst, obj); 2639 } else { 2640 if (code == lir_instanceof) { 2641 Register obj = op->object()->as_register(); 2642 Register dst = op->result_opr()->as_register(); 2643 NearLabel success, failure, done; 2644 emit_typecheck_helper(op, &success, &failure, &failure); 2645 __ bind(failure); 2646 __ clear_reg(dst); 2647 __ branch_optimized(Assembler::bcondAlways, done); 2648 __ bind(success); 2649 __ load_const_optimized(dst, 1); 2650 __ bind(done); 2651 } else { 2652 ShouldNotReachHere(); 2653 } 2654 } 2655 } 2656 } 2657 2658 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2659 Register addr = op->addr()->as_pointer_register(); 2660 Register t1_cmp = Z_R1_scratch; 2661 if (op->code() == lir_cas_long) { 2662 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2663 Register new_value_lo = op->new_value()->as_register_lo(); 2664 __ z_lgr(t1_cmp, cmp_value_lo); 2665 // Perform the compare and swap operation. 2666 __ z_csg(t1_cmp, new_value_lo, 0, addr); 2667 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2668 Register cmp_value = op->cmp_value()->as_register(); 2669 Register new_value = op->new_value()->as_register(); 2670 if (op->code() == lir_cas_obj) { 2671 if (UseCompressedOops) { 2672 t1_cmp = op->tmp1()->as_register(); 2673 Register t2_new = op->tmp2()->as_register(); 2674 assert_different_registers(cmp_value, new_value, addr, t1_cmp, t2_new); 2675 __ oop_encoder(t1_cmp, cmp_value, true /*maybe null*/); 2676 __ oop_encoder(t2_new, new_value, true /*maybe null*/); 2677 __ z_cs(t1_cmp, t2_new, 0, addr); 2678 } else { 2679 __ z_lgr(t1_cmp, cmp_value); 2680 __ z_csg(t1_cmp, new_value, 0, addr); 2681 } 2682 } else { 2683 __ z_lr(t1_cmp, cmp_value); 2684 __ z_cs(t1_cmp, new_value, 0, addr); 2685 } 2686 } else { 2687 ShouldNotReachHere(); // new lir_cas_?? 2688 } 2689 } 2690 2691 void LIR_Assembler::breakpoint() { 2692 Unimplemented(); 2693 // __ breakpoint_trap(); 2694 } 2695 2696 void LIR_Assembler::push(LIR_Opr opr) { 2697 ShouldNotCallThis(); // unused 2698 } 2699 2700 void LIR_Assembler::pop(LIR_Opr opr) { 2701 ShouldNotCallThis(); // unused 2702 } 2703 2704 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2705 Address addr = frame_map()->address_for_monitor_lock(monitor_no); 2706 __ add2reg(dst_opr->as_register(), addr.disp(), addr.base()); 2707 } 2708 2709 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2710 Register obj = op->obj_opr()->as_register(); // May not be an oop. 2711 Register hdr = op->hdr_opr()->as_register(); 2712 Register lock = op->lock_opr()->as_register(); 2713 if (op->code() == lir_lock) { 2714 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2715 // Add debug info for NullPointerException only if one is possible. 2716 if (op->info() != nullptr) { 2717 add_debug_info_for_null_check_here(op->info()); 2718 } 2719 __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2720 // done 2721 } else if (op->code() == lir_unlock) { 2722 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2723 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2724 } else { 2725 ShouldNotReachHere(); 2726 } 2727 __ bind(*op->stub()->continuation()); 2728 } 2729 2730 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 2731 Register obj = op->obj()->as_pointer_register(); 2732 Register result = op->result_opr()->as_pointer_register(); 2733 2734 CodeEmitInfo* info = op->info(); 2735 if (info != nullptr) { 2736 add_debug_info_for_null_check_here(info); 2737 } 2738 2739 __ load_klass(result, obj); 2740 } 2741 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2742 ciMethod* method = op->profiled_method(); 2743 int bci = op->profiled_bci(); 2744 ciMethod* callee = op->profiled_callee(); 2745 2746 // Update counter for all call types. 2747 ciMethodData* md = method->method_data_or_null(); 2748 assert(md != nullptr, "Sanity"); 2749 ciProfileData* data = md->bci_to_data(bci); 2750 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 2751 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2752 Register mdo = op->mdo()->as_register(); 2753 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2754 Register tmp1 = op->tmp1()->as_register_lo(); 2755 metadata2reg(md->constant_encoding(), mdo); 2756 2757 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2758 // Perform additional virtual call profiling for invokevirtual and 2759 // invokeinterface bytecodes 2760 if (op->should_profile_receiver_type()) { 2761 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2762 Register recv = op->recv()->as_register(); 2763 assert_different_registers(mdo, tmp1, recv); 2764 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2765 ciKlass* known_klass = op->known_holder(); 2766 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 2767 // We know the type that will be seen at this call site; we can 2768 // statically update the MethodData* rather than needing to do 2769 // dynamic tests on the receiver type. 2770 2771 // NOTE: we should probably put a lock around this search to 2772 // avoid collisions by concurrent compilations. 2773 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2774 uint i; 2775 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2776 ciKlass* receiver = vc_data->receiver(i); 2777 if (known_klass->equals(receiver)) { 2778 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2779 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2780 return; 2781 } 2782 } 2783 2784 // Receiver type not found in profile data. Select an empty slot. 2785 2786 // Note that this is less efficient than it should be because it 2787 // always does a write to the receiver part of the 2788 // VirtualCallData rather than just the first time. 2789 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2790 ciKlass* receiver = vc_data->receiver(i); 2791 if (receiver == nullptr) { 2792 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 2793 metadata2reg(known_klass->constant_encoding(), tmp1); 2794 __ z_stg(tmp1, recv_addr); 2795 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2796 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2797 return; 2798 } 2799 } 2800 } else { 2801 __ load_klass(recv, recv); 2802 NearLabel update_done; 2803 type_profile_helper(mdo, md, data, recv, tmp1, &update_done); 2804 // Receiver did not match any saved receiver and there is no empty row for it. 2805 // Increment total counter to indicate polymorphic case. 2806 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2807 __ bind(update_done); 2808 } 2809 } else { 2810 // static call 2811 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2812 } 2813 } 2814 2815 void LIR_Assembler::align_backward_branch_target() { 2816 __ align(OptoLoopAlignment); 2817 } 2818 2819 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2820 ShouldNotCallThis(); // There are no delay slots on ZARCH_64. 2821 } 2822 2823 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2824 // tmp must be unused 2825 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2826 assert(left->is_register(), "can only handle registers"); 2827 2828 if (left->is_single_cpu()) { 2829 __ z_lcr(dest->as_register(), left->as_register()); 2830 } else if (left->is_single_fpu()) { 2831 __ z_lcebr(dest->as_float_reg(), left->as_float_reg()); 2832 } else if (left->is_double_fpu()) { 2833 __ z_lcdbr(dest->as_double_reg(), left->as_double_reg()); 2834 } else { 2835 assert(left->is_double_cpu(), "Must be a long"); 2836 __ z_lcgr(dest->as_register_lo(), left->as_register_lo()); 2837 } 2838 } 2839 2840 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2841 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2842 assert(!tmp->is_valid(), "don't need temporary"); 2843 emit_call_c(dest); 2844 CHECK_BAILOUT(); 2845 if (info != nullptr) { 2846 add_call_info_here(info); 2847 } 2848 } 2849 2850 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2851 ShouldNotCallThis(); // not needed on ZARCH_64 2852 } 2853 2854 void LIR_Assembler::membar() { 2855 __ z_fence(); 2856 } 2857 2858 void LIR_Assembler::membar_acquire() { 2859 __ z_acquire(); 2860 } 2861 2862 void LIR_Assembler::membar_release() { 2863 __ z_release(); 2864 } 2865 2866 void LIR_Assembler::membar_loadload() { 2867 __ z_acquire(); 2868 } 2869 2870 void LIR_Assembler::membar_storestore() { 2871 __ z_release(); 2872 } 2873 2874 void LIR_Assembler::membar_loadstore() { 2875 __ z_acquire(); 2876 } 2877 2878 void LIR_Assembler::membar_storeload() { 2879 __ z_fence(); 2880 } 2881 2882 void LIR_Assembler::on_spin_wait() { 2883 Unimplemented(); 2884 } 2885 2886 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2887 assert(patch_code == lir_patch_none, "Patch code not supported"); 2888 LIR_Address* addr = addr_opr->as_address_ptr(); 2889 assert(addr->scale() == LIR_Address::times_1, "scaling unsupported"); 2890 __ load_address(dest->as_pointer_register(), as_Address(addr)); 2891 } 2892 2893 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2894 ShouldNotCallThis(); // unused 2895 } 2896 2897 #ifdef ASSERT 2898 // Emit run-time assertion. 2899 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2900 Unimplemented(); 2901 } 2902 #endif 2903 2904 void LIR_Assembler::peephole(LIR_List*) { 2905 // Do nothing for now. 2906 } 2907 2908 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2909 assert(code == lir_xadd, "lir_xchg not supported"); 2910 Address src_addr = as_Address(src->as_address_ptr()); 2911 Register base = src_addr.base(); 2912 intptr_t disp = src_addr.disp(); 2913 if (src_addr.index()->is_valid()) { 2914 // LAA and LAAG do not support index register. 2915 __ load_address(Z_R1_scratch, src_addr); 2916 base = Z_R1_scratch; 2917 disp = 0; 2918 } 2919 if (data->type() == T_INT) { 2920 __ z_laa(dest->as_register(), data->as_register(), disp, base); 2921 } else if (data->type() == T_LONG) { 2922 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 2923 __ z_laag(dest->as_register_lo(), data->as_register_lo(), disp, base); 2924 } else { 2925 ShouldNotReachHere(); 2926 } 2927 } 2928 2929 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2930 Register obj = op->obj()->as_register(); 2931 Register tmp1 = op->tmp()->as_pointer_register(); 2932 Register tmp2 = Z_R1_scratch; 2933 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2934 ciKlass* exact_klass = op->exact_klass(); 2935 intptr_t current_klass = op->current_klass(); 2936 bool not_null = op->not_null(); 2937 bool no_conflict = op->no_conflict(); 2938 2939 Label update, next, none, null_seen, init_klass; 2940 2941 bool do_null = !not_null; 2942 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2943 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2944 2945 assert(do_null || do_update, "why are we here?"); 2946 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2947 2948 __ verify_oop(obj, FILE_AND_LINE); 2949 2950 if (do_null || tmp1 != obj DEBUG_ONLY(|| true)) { 2951 __ z_ltgr(tmp1, obj); 2952 } 2953 if (do_null) { 2954 __ z_brnz(update); 2955 if (!TypeEntries::was_null_seen(current_klass)) { 2956 __ z_lg(tmp1, mdo_addr); 2957 __ z_oill(tmp1, TypeEntries::null_seen); 2958 __ z_stg(tmp1, mdo_addr); 2959 } 2960 if (do_update) { 2961 __ z_bru(next); 2962 } 2963 } else { 2964 __ asm_assert(Assembler::bcondNotZero, "unexpected null obj", __LINE__); 2965 } 2966 2967 __ bind(update); 2968 2969 if (do_update) { 2970 #ifdef ASSERT 2971 if (exact_klass != nullptr) { 2972 __ load_klass(tmp1, tmp1); 2973 metadata2reg(exact_klass->constant_encoding(), tmp2); 2974 __ z_cgr(tmp1, tmp2); 2975 __ asm_assert(Assembler::bcondEqual, "exact klass and actual klass differ", __LINE__); 2976 } 2977 #endif 2978 2979 Label do_update; 2980 __ z_lg(tmp2, mdo_addr); 2981 2982 if (!no_conflict) { 2983 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) { 2984 if (exact_klass != nullptr) { 2985 metadata2reg(exact_klass->constant_encoding(), tmp1); 2986 } else { 2987 __ load_klass(tmp1, tmp1); 2988 } 2989 2990 // Klass seen before: nothing to do (regardless of unknown bit). 2991 __ z_lgr(Z_R0_scratch, tmp2); 2992 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 2993 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 2994 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 2995 2996 // Already unknown: Nothing to do anymore. 2997 __ z_tmll(tmp2, TypeEntries::type_unknown); 2998 __ z_brc(Assembler::bcondAllOne, next); 2999 3000 if (TypeEntries::is_type_none(current_klass)) { 3001 __ z_lgr(Z_R0_scratch, tmp2); 3002 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3003 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3004 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass); 3005 } 3006 } else { 3007 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3008 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3009 3010 // Already unknown: Nothing to do anymore. 3011 __ z_tmll(tmp2, TypeEntries::type_unknown); 3012 __ z_brc(Assembler::bcondAllOne, next); 3013 } 3014 3015 // Different than before. Cannot keep accurate profile. 3016 __ z_oill(tmp2, TypeEntries::type_unknown); 3017 __ z_bru(do_update); 3018 } else { 3019 // There's a single possible klass at this profile point. 3020 assert(exact_klass != nullptr, "should be"); 3021 if (TypeEntries::is_type_none(current_klass)) { 3022 metadata2reg(exact_klass->constant_encoding(), tmp1); 3023 __ z_lgr(Z_R0_scratch, tmp2); 3024 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 3025 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 3026 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 3027 #ifdef ASSERT 3028 { 3029 Label ok; 3030 __ z_lgr(Z_R0_scratch, tmp2); 3031 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3032 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3033 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, ok); 3034 __ stop("unexpected profiling mismatch"); 3035 __ bind(ok); 3036 } 3037 #endif 3038 3039 } else { 3040 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3041 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3042 3043 // Already unknown: Nothing to do anymore. 3044 __ z_tmll(tmp2, TypeEntries::type_unknown); 3045 __ z_brc(Assembler::bcondAllOne, next); 3046 __ z_oill(tmp2, TypeEntries::type_unknown); 3047 __ z_bru(do_update); 3048 } 3049 } 3050 3051 __ bind(init_klass); 3052 // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3053 __ z_ogr(tmp2, tmp1); 3054 3055 __ bind(do_update); 3056 __ z_stg(tmp2, mdo_addr); 3057 3058 __ bind(next); 3059 } 3060 } 3061 3062 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) { 3063 Unimplemented(); 3064 } 3065 3066 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3067 assert(op->crc()->is_single_cpu(), "crc must be register"); 3068 assert(op->val()->is_single_cpu(), "byte value must be register"); 3069 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3070 Register crc = op->crc()->as_register(); 3071 Register val = op->val()->as_register(); 3072 Register res = op->result_opr()->as_register(); 3073 3074 assert_different_registers(val, crc, res); 3075 3076 __ load_const_optimized(res, StubRoutines::crc_table_addr()); 3077 __ kernel_crc32_singleByteReg(crc, val, res, true); 3078 __ z_lgfr(res, crc); 3079 } 3080 3081 #undef __