1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_LIRAssembler.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "c1/c1_ValueStack.hpp" 31 #include "ci/ciArrayKlass.hpp" 32 #include "ci/ciInstance.hpp" 33 #include "gc_interface/collectedHeap.hpp" 34 #include "memory/barrierSet.hpp" 35 #include "memory/cardTableModRefBS.hpp" 36 #include "nativeInst_sparc.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 40 #define __ _masm-> 41 42 43 //------------------------------------------------------------ 44 45 46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 47 if (opr->is_constant()) { 48 LIR_Const* constant = opr->as_constant_ptr(); 49 switch (constant->type()) { 50 case T_INT: { 51 jint value = constant->as_jint(); 52 return Assembler::is_simm13(value); 53 } 54 55 default: 56 return false; 57 } 58 } 59 return false; 60 } 61 62 63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { 64 switch (op->code()) { 65 case lir_null_check: 66 return true; 67 68 69 case lir_add: 70 case lir_ushr: 71 case lir_shr: 72 case lir_shl: 73 // integer shifts and adds are always one instruction 74 return op->result_opr()->is_single_cpu(); 75 76 77 case lir_move: { 78 LIR_Op1* op1 = op->as_Op1(); 79 LIR_Opr src = op1->in_opr(); 80 LIR_Opr dst = op1->result_opr(); 81 82 if (src == dst) { 83 NEEDS_CLEANUP; 84 // this works around a problem where moves with the same src and dst 85 // end up in the delay slot and then the assembler swallows the mov 86 // since it has no effect and then it complains because the delay slot 87 // is empty. returning false stops the optimizer from putting this in 88 // the delay slot 89 return false; 90 } 91 92 // don't put moves involving oops into the delay slot since the VerifyOops code 93 // will make it much larger than a single instruction. 94 if (VerifyOops) { 95 return false; 96 } 97 98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || 99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { 100 return false; 101 } 102 103 if (UseCompressedOops) { 104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; 105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; 106 } 107 108 if (UseCompressedClassPointers) { 109 if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS && 110 src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false; 111 } 112 113 if (dst->is_register()) { 114 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 115 return !PatchALot; 116 } else if (src->is_single_stack()) { 117 return true; 118 } 119 } 120 121 if (src->is_register()) { 122 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { 123 return !PatchALot; 124 } else if (dst->is_single_stack()) { 125 return true; 126 } 127 } 128 129 if (dst->is_register() && 130 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || 131 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { 132 return true; 133 } 134 135 return false; 136 } 137 138 default: 139 return false; 140 } 141 ShouldNotReachHere(); 142 } 143 144 145 LIR_Opr LIR_Assembler::receiverOpr() { 146 return FrameMap::O0_oop_opr; 147 } 148 149 150 LIR_Opr LIR_Assembler::osrBufferPointer() { 151 return FrameMap::I0_opr; 152 } 153 154 155 int LIR_Assembler::initial_frame_size_in_bytes() const { 156 return in_bytes(frame_map()->framesize_in_bytes()); 157 } 158 159 160 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); 161 // we fetch the class of the receiver (O0) and compare it with the cached class. 162 // If they do not match we jump to slow case. 163 int LIR_Assembler::check_icache() { 164 int offset = __ offset(); 165 __ inline_cache_check(O0, G5_inline_cache_reg); 166 return offset; 167 } 168 169 170 void LIR_Assembler::osr_entry() { 171 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 172 // 173 // 1. Create a new compiled activation. 174 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 175 // at the osr_bci; it is not initialized. 176 // 3. Jump to the continuation address in compiled code to resume execution. 177 178 // OSR entry point 179 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 180 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 181 ValueStack* entry_state = osr_entry->end()->state(); 182 int number_of_locks = entry_state->locks_size(); 183 184 // Create a frame for the compiled activation. 185 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 186 187 // OSR buffer is 188 // 189 // locals[nlocals-1..0] 190 // monitors[number_of_locks-1..0] 191 // 192 // locals is a direct copy of the interpreter frame so in the osr buffer 193 // so first slot in the local array is the last local from the interpreter 194 // and last slot is local[0] (receiver) from the interpreter 195 // 196 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 197 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 198 // in the interpreter frame (the method lock if a sync method) 199 200 // Initialize monitors in the compiled activation. 201 // I0: pointer to osr buffer 202 // 203 // All other registers are dead at this point and the locals will be 204 // copied into place by code emitted in the IR. 205 206 Register OSR_buf = osrBufferPointer()->as_register(); 207 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 208 int monitor_offset = BytesPerWord * method()->max_locals() + 209 (2 * BytesPerWord) * (number_of_locks - 1); 210 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 211 // the OSR buffer using 2 word entries: first the lock and then 212 // the oop. 213 for (int i = 0; i < number_of_locks; i++) { 214 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 215 #ifdef ASSERT 216 // verify the interpreter's monitor has a non-null object 217 { 218 Label L; 219 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 220 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L); 221 __ stop("locked object is NULL"); 222 __ bind(L); 223 } 224 #endif // ASSERT 225 // Copy the lock field into the compiled activation. 226 __ ld_ptr(OSR_buf, slot_offset + 0, O7); 227 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); 228 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); 229 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); 230 } 231 } 232 } 233 234 235 // Optimized Library calls 236 // This is the fast version of java.lang.String.compare; it has not 237 // OSR-entry and therefore, we generate a slow version for OSR's 238 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) { 239 Register str0 = left->as_register(); 240 Register str1 = right->as_register(); 241 242 Label Ldone; 243 244 Register result = dst->as_register(); 245 { 246 // Get a pointer to the first character of string0 in tmp0 247 // and get string0.length() in str0 248 // Get a pointer to the first character of string1 in tmp1 249 // and get string1.length() in str1 250 // Also, get string0.length()-string1.length() in 251 // o7 and get the condition code set 252 // Note: some instructions have been hoisted for better instruction scheduling 253 254 Register tmp0 = L0; 255 Register tmp1 = L1; 256 Register tmp2 = L2; 257 258 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array 259 if (java_lang_String::has_offset_field()) { 260 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 261 int count_offset = java_lang_String:: count_offset_in_bytes(); 262 __ load_heap_oop(str0, value_offset, tmp0); 263 __ ld(str0, offset_offset, tmp2); 264 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 265 __ ld(str0, count_offset, str0); 266 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 267 } else { 268 __ load_heap_oop(str0, value_offset, tmp1); 269 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 270 __ ld(tmp1, arrayOopDesc::length_offset_in_bytes(), str0); 271 } 272 273 // str1 may be null 274 add_debug_info_for_null_check_here(info); 275 276 if (java_lang_String::has_offset_field()) { 277 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 278 int count_offset = java_lang_String:: count_offset_in_bytes(); 279 __ load_heap_oop(str1, value_offset, tmp1); 280 __ add(tmp0, tmp2, tmp0); 281 282 __ ld(str1, offset_offset, tmp2); 283 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); 284 __ ld(str1, count_offset, str1); 285 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); 286 __ add(tmp1, tmp2, tmp1); 287 } else { 288 __ load_heap_oop(str1, value_offset, tmp2); 289 __ add(tmp2, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); 290 __ ld(tmp2, arrayOopDesc::length_offset_in_bytes(), str1); 291 } 292 __ subcc(str0, str1, O7); 293 } 294 295 { 296 // Compute the minimum of the string lengths, scale it and store it in limit 297 Register count0 = I0; 298 Register count1 = I1; 299 Register limit = L3; 300 301 Label Lskip; 302 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter 303 __ br(Assembler::greater, true, Assembler::pt, Lskip); 304 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter 305 __ bind(Lskip); 306 307 // If either string is empty (or both of them) the result is the difference in lengths 308 __ cmp(limit, 0); 309 __ br(Assembler::equal, true, Assembler::pn, Ldone); 310 __ delayed()->mov(O7, result); // result is difference in lengths 311 } 312 313 { 314 // Neither string is empty 315 Label Lloop; 316 317 Register base0 = L0; 318 Register base1 = L1; 319 Register chr0 = I0; 320 Register chr1 = I1; 321 Register limit = L3; 322 323 // Shift base0 and base1 to the end of the arrays, negate limit 324 __ add(base0, limit, base0); 325 __ add(base1, limit, base1); 326 __ neg(limit); // limit = -min{string0.length(), string1.length()} 327 328 __ lduh(base0, limit, chr0); 329 __ bind(Lloop); 330 __ lduh(base1, limit, chr1); 331 __ subcc(chr0, chr1, chr0); 332 __ br(Assembler::notZero, false, Assembler::pn, Ldone); 333 assert(chr0 == result, "result must be pre-placed"); 334 __ delayed()->inccc(limit, sizeof(jchar)); 335 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 336 __ delayed()->lduh(base0, limit, chr0); 337 } 338 339 // If strings are equal up to min length, return the length difference. 340 __ mov(O7, result); 341 342 // Otherwise, return the difference between the first mismatched chars. 343 __ bind(Ldone); 344 } 345 346 347 // -------------------------------------------------------------------------------------------- 348 349 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { 350 if (!GenerateSynchronizationCode) return; 351 352 Register obj_reg = obj_opr->as_register(); 353 Register lock_reg = lock_opr->as_register(); 354 355 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 356 Register reg = mon_addr.base(); 357 int offset = mon_addr.disp(); 358 // compute pointer to BasicLock 359 if (mon_addr.is_simm13()) { 360 __ add(reg, offset, lock_reg); 361 } 362 else { 363 __ set(offset, lock_reg); 364 __ add(reg, lock_reg, lock_reg); 365 } 366 // unlock object 367 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); 368 // _slow_case_stubs->append(slow_case); 369 // temporary fix: must be created after exceptionhandler, therefore as call stub 370 _slow_case_stubs->append(slow_case); 371 if (UseFastLocking) { 372 // try inlined fast unlocking first, revert to slow locking if it fails 373 // note: lock_reg points to the displaced header since the displaced header offset is 0! 374 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 375 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); 376 } else { 377 // always do slow unlocking 378 // note: the slow unlocking code could be inlined here, however if we use 379 // slow unlocking, speed doesn't matter anyway and this solution is 380 // simpler and requires less duplicated code - additionally, the 381 // slow unlocking code is the same in either case which simplifies 382 // debugging 383 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); 384 __ delayed()->nop(); 385 } 386 // done 387 __ bind(*slow_case->continuation()); 388 } 389 390 391 int LIR_Assembler::emit_exception_handler() { 392 // if the last instruction is a call (typically to do a throw which 393 // is coming at the end after block reordering) the return address 394 // must still point into the code area in order to avoid assertion 395 // failures when searching for the corresponding bci => add a nop 396 // (was bug 5/14/1999 - gri) 397 __ nop(); 398 399 // generate code for exception handler 400 ciMethod* method = compilation()->method(); 401 402 address handler_base = __ start_a_stub(exception_handler_size); 403 404 if (handler_base == NULL) { 405 // not enough space left for the handler 406 bailout("exception handler overflow"); 407 return -1; 408 } 409 410 int offset = code_offset(); 411 412 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 413 __ delayed()->nop(); 414 __ should_not_reach_here(); 415 guarantee(code_offset() - offset <= exception_handler_size, "overflow"); 416 __ end_a_stub(); 417 418 return offset; 419 } 420 421 422 // Emit the code to remove the frame from the stack in the exception 423 // unwind path. 424 int LIR_Assembler::emit_unwind_handler() { 425 #ifndef PRODUCT 426 if (CommentedAssembly) { 427 _masm->block_comment("Unwind handler"); 428 } 429 #endif 430 431 int offset = code_offset(); 432 433 // Fetch the exception from TLS and clear out exception related thread state 434 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); 435 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 436 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 437 438 __ bind(_unwind_handler_entry); 439 __ verify_not_null_oop(O0); 440 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 441 __ mov(O0, I0); // Preserve the exception 442 } 443 444 // Preform needed unlocking 445 MonitorExitStub* stub = NULL; 446 if (method()->is_synchronized()) { 447 monitor_address(0, FrameMap::I1_opr); 448 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); 449 __ unlock_object(I3, I2, I1, *stub->entry()); 450 __ bind(*stub->continuation()); 451 } 452 453 if (compilation()->env()->dtrace_method_probes()) { 454 __ mov(G2_thread, O0); 455 __ save_thread(I1); // need to preserve thread in G2 across 456 // runtime call 457 metadata2reg(method()->constant_encoding(), O1); 458 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); 459 __ delayed()->nop(); 460 __ restore_thread(I1); 461 } 462 463 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 464 __ mov(I0, O0); // Restore the exception 465 } 466 467 // dispatch to the unwind logic 468 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); 469 __ delayed()->nop(); 470 471 // Emit the slow path assembly 472 if (stub != NULL) { 473 stub->emit_code(this); 474 } 475 476 return offset; 477 } 478 479 480 int LIR_Assembler::emit_deopt_handler() { 481 // if the last instruction is a call (typically to do a throw which 482 // is coming at the end after block reordering) the return address 483 // must still point into the code area in order to avoid assertion 484 // failures when searching for the corresponding bci => add a nop 485 // (was bug 5/14/1999 - gri) 486 __ nop(); 487 488 // generate code for deopt handler 489 ciMethod* method = compilation()->method(); 490 address handler_base = __ start_a_stub(deopt_handler_size); 491 if (handler_base == NULL) { 492 // not enough space left for the handler 493 bailout("deopt handler overflow"); 494 return -1; 495 } 496 497 int offset = code_offset(); 498 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 499 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp 500 __ delayed()->nop(); 501 guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); 502 __ end_a_stub(); 503 504 return offset; 505 } 506 507 508 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 509 if (o == NULL) { 510 __ set(NULL_WORD, reg); 511 } else { 512 #ifdef ASSERT 513 { 514 ThreadInVMfromNative tiv(JavaThread::current()); 515 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop"); 516 } 517 #endif 518 int oop_index = __ oop_recorder()->find_index(o); 519 RelocationHolder rspec = oop_Relocation::spec(oop_index); 520 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created 521 } 522 } 523 524 525 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 526 // Allocate a new index in table to hold the object once it's been patched 527 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 528 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 529 530 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); 531 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 532 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 533 // NULL will be dynamically patched later and the patched value may be large. We must 534 // therefore generate the sethi/add as a placeholders 535 __ patchable_set(addrlit, reg); 536 537 patching_epilog(patch, lir_patch_normal, reg, info); 538 } 539 540 541 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 542 __ set_metadata_constant(o, reg); 543 } 544 545 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 546 // Allocate a new index in table to hold the klass once it's been patched 547 int index = __ oop_recorder()->allocate_metadata_index(NULL); 548 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 549 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index)); 550 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 551 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the 552 // NULL will be dynamically patched later and the patched value may be large. We must 553 // therefore generate the sethi/add as a placeholders 554 __ patchable_set(addrlit, reg); 555 556 patching_epilog(patch, lir_patch_normal, reg, info); 557 } 558 559 void LIR_Assembler::emit_op3(LIR_Op3* op) { 560 Register Rdividend = op->in_opr1()->as_register(); 561 Register Rdivisor = noreg; 562 Register Rscratch = op->in_opr3()->as_register(); 563 Register Rresult = op->result_opr()->as_register(); 564 int divisor = -1; 565 566 if (op->in_opr2()->is_register()) { 567 Rdivisor = op->in_opr2()->as_register(); 568 } else { 569 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); 570 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 571 } 572 573 assert(Rdividend != Rscratch, ""); 574 assert(Rdivisor != Rscratch, ""); 575 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); 576 577 if (Rdivisor == noreg && is_power_of_2(divisor)) { 578 // convert division by a power of two into some shifts and logical operations 579 if (op->code() == lir_idiv) { 580 if (divisor == 2) { 581 __ srl(Rdividend, 31, Rscratch); 582 } else { 583 __ sra(Rdividend, 31, Rscratch); 584 __ and3(Rscratch, divisor - 1, Rscratch); 585 } 586 __ add(Rdividend, Rscratch, Rscratch); 587 __ sra(Rscratch, log2_int(divisor), Rresult); 588 return; 589 } else { 590 if (divisor == 2) { 591 __ srl(Rdividend, 31, Rscratch); 592 } else { 593 __ sra(Rdividend, 31, Rscratch); 594 __ and3(Rscratch, divisor - 1,Rscratch); 595 } 596 __ add(Rdividend, Rscratch, Rscratch); 597 __ andn(Rscratch, divisor - 1,Rscratch); 598 __ sub(Rdividend, Rscratch, Rresult); 599 return; 600 } 601 } 602 603 __ sra(Rdividend, 31, Rscratch); 604 __ wry(Rscratch); 605 606 add_debug_info_for_div0_here(op->info()); 607 608 if (Rdivisor != noreg) { 609 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 610 } else { 611 assert(Assembler::is_simm13(divisor), "can only handle simm13"); 612 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); 613 } 614 615 Label skip; 616 __ br(Assembler::overflowSet, true, Assembler::pn, skip); 617 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); 618 __ bind(skip); 619 620 if (op->code() == lir_irem) { 621 if (Rdivisor != noreg) { 622 __ smul(Rscratch, Rdivisor, Rscratch); 623 } else { 624 __ smul(Rscratch, divisor, Rscratch); 625 } 626 __ sub(Rdividend, Rscratch, Rresult); 627 } 628 } 629 630 631 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 632 #ifdef ASSERT 633 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 634 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 635 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 636 #endif 637 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 638 639 if (op->cond() == lir_cond_always) { 640 __ br(Assembler::always, false, Assembler::pt, *(op->label())); 641 } else if (op->code() == lir_cond_float_branch) { 642 assert(op->ublock() != NULL, "must have unordered successor"); 643 bool is_unordered = (op->ublock() == op->block()); 644 Assembler::Condition acond; 645 switch (op->cond()) { 646 case lir_cond_equal: acond = Assembler::f_equal; break; 647 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; 648 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; 649 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; 650 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; 651 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; 652 default : ShouldNotReachHere(); 653 } 654 __ fb( acond, false, Assembler::pn, *(op->label())); 655 } else { 656 assert (op->code() == lir_branch, "just checking"); 657 658 Assembler::Condition acond; 659 switch (op->cond()) { 660 case lir_cond_equal: acond = Assembler::equal; break; 661 case lir_cond_notEqual: acond = Assembler::notEqual; break; 662 case lir_cond_less: acond = Assembler::less; break; 663 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 664 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 665 case lir_cond_greater: acond = Assembler::greater; break; 666 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 667 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 668 default: ShouldNotReachHere(); 669 }; 670 671 // sparc has different condition codes for testing 32-bit 672 // vs. 64-bit values. We could always test xcc is we could 673 // guarantee that 32-bit loads always sign extended but that isn't 674 // true and since sign extension isn't free, it would impose a 675 // slight cost. 676 #ifdef _LP64 677 if (op->type() == T_INT) { 678 __ br(acond, false, Assembler::pn, *(op->label())); 679 } else 680 #endif 681 __ brx(acond, false, Assembler::pn, *(op->label())); 682 } 683 // The peephole pass fills the delay slot 684 } 685 686 void LIR_Assembler::emit_opShenandoahWriteBarrier(LIR_OpShenandoahWriteBarrier* op) { 687 Unimplemented(); 688 } 689 690 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 691 Bytecodes::Code code = op->bytecode(); 692 LIR_Opr dst = op->result_opr(); 693 694 switch(code) { 695 case Bytecodes::_i2l: { 696 Register rlo = dst->as_register_lo(); 697 Register rhi = dst->as_register_hi(); 698 Register rval = op->in_opr()->as_register(); 699 #ifdef _LP64 700 __ sra(rval, 0, rlo); 701 #else 702 __ mov(rval, rlo); 703 __ sra(rval, BitsPerInt-1, rhi); 704 #endif 705 break; 706 } 707 case Bytecodes::_i2d: 708 case Bytecodes::_i2f: { 709 bool is_double = (code == Bytecodes::_i2d); 710 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 711 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 712 FloatRegister rsrc = op->in_opr()->as_float_reg(); 713 if (rsrc != rdst) { 714 __ fmov(FloatRegisterImpl::S, rsrc, rdst); 715 } 716 __ fitof(w, rdst, rdst); 717 break; 718 } 719 case Bytecodes::_f2i:{ 720 FloatRegister rsrc = op->in_opr()->as_float_reg(); 721 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); 722 Label L; 723 // result must be 0 if value is NaN; test by comparing value to itself 724 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); 725 __ fb(Assembler::f_unordered, true, Assembler::pn, L); 726 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN 727 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); 728 // move integer result from float register to int register 729 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); 730 __ bind (L); 731 break; 732 } 733 case Bytecodes::_l2i: { 734 Register rlo = op->in_opr()->as_register_lo(); 735 Register rhi = op->in_opr()->as_register_hi(); 736 Register rdst = dst->as_register(); 737 #ifdef _LP64 738 __ sra(rlo, 0, rdst); 739 #else 740 __ mov(rlo, rdst); 741 #endif 742 break; 743 } 744 case Bytecodes::_d2f: 745 case Bytecodes::_f2d: { 746 bool is_double = (code == Bytecodes::_f2d); 747 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); 748 LIR_Opr val = op->in_opr(); 749 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); 750 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); 751 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; 752 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; 753 __ ftof(vw, dw, rval, rdst); 754 break; 755 } 756 case Bytecodes::_i2s: 757 case Bytecodes::_i2b: { 758 Register rval = op->in_opr()->as_register(); 759 Register rdst = dst->as_register(); 760 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); 761 __ sll (rval, shift, rdst); 762 __ sra (rdst, shift, rdst); 763 break; 764 } 765 case Bytecodes::_i2c: { 766 Register rval = op->in_opr()->as_register(); 767 Register rdst = dst->as_register(); 768 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; 769 __ sll (rval, shift, rdst); 770 __ srl (rdst, shift, rdst); 771 break; 772 } 773 774 default: ShouldNotReachHere(); 775 } 776 } 777 778 779 void LIR_Assembler::align_call(LIR_Code) { 780 // do nothing since all instructions are word aligned on sparc 781 } 782 783 784 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 785 __ call(op->addr(), rtype); 786 // The peephole pass fills the delay slot, add_call_info is done in 787 // LIR_Assembler::emit_delay. 788 } 789 790 791 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 792 __ ic_call(op->addr(), false); 793 // The peephole pass fills the delay slot, add_call_info is done in 794 // LIR_Assembler::emit_delay. 795 } 796 797 798 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 799 add_debug_info_for_null_check_here(op->info()); 800 __ load_klass(O0, G3_scratch); 801 if (Assembler::is_simm13(op->vtable_offset())) { 802 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 803 } else { 804 // This will generate 2 instructions 805 __ set(op->vtable_offset(), G5_method); 806 // ld_ptr, set_hi, set 807 __ ld_ptr(G3_scratch, G5_method, G5_method); 808 } 809 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch); 810 __ callr(G3_scratch, G0); 811 // the peephole pass fills the delay slot 812 } 813 814 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 815 int store_offset; 816 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 817 assert(!unaligned, "can't handle this"); 818 // for offsets larger than a simm13 we setup the offset in O7 819 __ set(offset, O7); 820 store_offset = store(from_reg, base, O7, type, wide); 821 } else { 822 if (type == T_ARRAY || type == T_OBJECT) { 823 __ verify_oop(from_reg->as_register()); 824 } 825 store_offset = code_offset(); 826 switch (type) { 827 case T_BOOLEAN: // fall through 828 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; 829 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; 830 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; 831 case T_INT : __ stw(from_reg->as_register(), base, offset); break; 832 case T_LONG : 833 #ifdef _LP64 834 if (unaligned || PatchALot) { 835 __ srax(from_reg->as_register_lo(), 32, O7); 836 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 837 __ stw(O7, base, offset + hi_word_offset_in_bytes); 838 } else { 839 __ stx(from_reg->as_register_lo(), base, offset); 840 } 841 #else 842 assert(Assembler::is_simm13(offset + 4), "must be"); 843 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); 844 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); 845 #endif 846 break; 847 case T_ADDRESS: 848 case T_METADATA: 849 __ st_ptr(from_reg->as_register(), base, offset); 850 break; 851 case T_ARRAY : // fall through 852 case T_OBJECT: 853 { 854 if (UseCompressedOops && !wide) { 855 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 856 store_offset = code_offset(); 857 __ stw(G3_scratch, base, offset); 858 } else { 859 __ st_ptr(from_reg->as_register(), base, offset); 860 } 861 break; 862 } 863 864 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 865 case T_DOUBLE: 866 { 867 FloatRegister reg = from_reg->as_double_reg(); 868 // split unaligned stores 869 if (unaligned || PatchALot) { 870 assert(Assembler::is_simm13(offset + 4), "must be"); 871 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); 872 __ stf(FloatRegisterImpl::S, reg, base, offset); 873 } else { 874 __ stf(FloatRegisterImpl::D, reg, base, offset); 875 } 876 break; 877 } 878 default : ShouldNotReachHere(); 879 } 880 } 881 return store_offset; 882 } 883 884 885 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 886 if (type == T_ARRAY || type == T_OBJECT) { 887 __ verify_oop(from_reg->as_register()); 888 } 889 int store_offset = code_offset(); 890 switch (type) { 891 case T_BOOLEAN: // fall through 892 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; 893 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; 894 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; 895 case T_INT : __ stw(from_reg->as_register(), base, disp); break; 896 case T_LONG : 897 #ifdef _LP64 898 __ stx(from_reg->as_register_lo(), base, disp); 899 #else 900 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match"); 901 __ std(from_reg->as_register_hi(), base, disp); 902 #endif 903 break; 904 case T_ADDRESS: 905 __ st_ptr(from_reg->as_register(), base, disp); 906 break; 907 case T_ARRAY : // fall through 908 case T_OBJECT: 909 { 910 if (UseCompressedOops && !wide) { 911 __ encode_heap_oop(from_reg->as_register(), G3_scratch); 912 store_offset = code_offset(); 913 __ stw(G3_scratch, base, disp); 914 } else { 915 __ st_ptr(from_reg->as_register(), base, disp); 916 } 917 break; 918 } 919 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 920 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 921 default : ShouldNotReachHere(); 922 } 923 return store_offset; 924 } 925 926 927 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 928 int load_offset; 929 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 930 assert(base != O7, "destroying register"); 931 assert(!unaligned, "can't handle this"); 932 // for offsets larger than a simm13 we setup the offset in O7 933 __ set(offset, O7); 934 load_offset = load(base, O7, to_reg, type, wide); 935 } else { 936 load_offset = code_offset(); 937 switch(type) { 938 case T_BOOLEAN: // fall through 939 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; 940 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; 941 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; 942 case T_INT : __ ld(base, offset, to_reg->as_register()); break; 943 case T_LONG : 944 if (!unaligned) { 945 #ifdef _LP64 946 __ ldx(base, offset, to_reg->as_register_lo()); 947 #else 948 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 949 "must be sequential"); 950 __ ldd(base, offset, to_reg->as_register_hi()); 951 #endif 952 } else { 953 #ifdef _LP64 954 assert(base != to_reg->as_register_lo(), "can't handle this"); 955 assert(O7 != to_reg->as_register_lo(), "can't handle this"); 956 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); 957 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last 958 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); 959 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); 960 #else 961 if (base == to_reg->as_register_lo()) { 962 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 963 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 964 } else { 965 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); 966 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); 967 } 968 #endif 969 } 970 break; 971 case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break; 972 case T_ADDRESS: 973 #ifdef _LP64 974 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 975 __ lduw(base, offset, to_reg->as_register()); 976 __ decode_klass_not_null(to_reg->as_register()); 977 } else 978 #endif 979 { 980 __ ld_ptr(base, offset, to_reg->as_register()); 981 } 982 break; 983 case T_ARRAY : // fall through 984 case T_OBJECT: 985 { 986 if (UseCompressedOops && !wide) { 987 __ lduw(base, offset, to_reg->as_register()); 988 __ decode_heap_oop(to_reg->as_register()); 989 } else { 990 __ ld_ptr(base, offset, to_reg->as_register()); 991 } 992 break; 993 } 994 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 995 case T_DOUBLE: 996 { 997 FloatRegister reg = to_reg->as_double_reg(); 998 // split unaligned loads 999 if (unaligned || PatchALot) { 1000 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); 1001 __ ldf(FloatRegisterImpl::S, base, offset, reg); 1002 } else { 1003 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); 1004 } 1005 break; 1006 } 1007 default : ShouldNotReachHere(); 1008 } 1009 if (type == T_ARRAY || type == T_OBJECT) { 1010 __ verify_oop(to_reg->as_register()); 1011 } 1012 } 1013 return load_offset; 1014 } 1015 1016 1017 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 1018 int load_offset = code_offset(); 1019 switch(type) { 1020 case T_BOOLEAN: // fall through 1021 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 1022 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 1023 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 1024 case T_INT : __ ld(base, disp, to_reg->as_register()); break; 1025 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 1026 case T_ARRAY : // fall through 1027 case T_OBJECT: 1028 { 1029 if (UseCompressedOops && !wide) { 1030 __ lduw(base, disp, to_reg->as_register()); 1031 __ decode_heap_oop(to_reg->as_register()); 1032 } else { 1033 __ ld_ptr(base, disp, to_reg->as_register()); 1034 } 1035 break; 1036 } 1037 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 1038 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 1039 case T_LONG : 1040 #ifdef _LP64 1041 __ ldx(base, disp, to_reg->as_register_lo()); 1042 #else 1043 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), 1044 "must be sequential"); 1045 __ ldd(base, disp, to_reg->as_register_hi()); 1046 #endif 1047 break; 1048 default : ShouldNotReachHere(); 1049 } 1050 if (type == T_ARRAY || type == T_OBJECT) { 1051 __ verify_oop(to_reg->as_register()); 1052 } 1053 return load_offset; 1054 } 1055 1056 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 1057 LIR_Const* c = src->as_constant_ptr(); 1058 switch (c->type()) { 1059 case T_INT: 1060 case T_FLOAT: { 1061 Register src_reg = O7; 1062 int value = c->as_jint_bits(); 1063 if (value == 0) { 1064 src_reg = G0; 1065 } else { 1066 __ set(value, O7); 1067 } 1068 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1069 __ stw(src_reg, addr.base(), addr.disp()); 1070 break; 1071 } 1072 case T_ADDRESS: { 1073 Register src_reg = O7; 1074 int value = c->as_jint_bits(); 1075 if (value == 0) { 1076 src_reg = G0; 1077 } else { 1078 __ set(value, O7); 1079 } 1080 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1081 __ st_ptr(src_reg, addr.base(), addr.disp()); 1082 break; 1083 } 1084 case T_OBJECT: { 1085 Register src_reg = O7; 1086 jobject2reg(c->as_jobject(), src_reg); 1087 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1088 __ st_ptr(src_reg, addr.base(), addr.disp()); 1089 break; 1090 } 1091 case T_LONG: 1092 case T_DOUBLE: { 1093 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1094 1095 Register tmp = O7; 1096 int value_lo = c->as_jint_lo_bits(); 1097 if (value_lo == 0) { 1098 tmp = G0; 1099 } else { 1100 __ set(value_lo, O7); 1101 } 1102 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); 1103 int value_hi = c->as_jint_hi_bits(); 1104 if (value_hi == 0) { 1105 tmp = G0; 1106 } else { 1107 __ set(value_hi, O7); 1108 } 1109 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); 1110 break; 1111 } 1112 default: 1113 Unimplemented(); 1114 } 1115 } 1116 1117 1118 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 1119 LIR_Const* c = src->as_constant_ptr(); 1120 LIR_Address* addr = dest->as_address_ptr(); 1121 Register base = addr->base()->as_pointer_register(); 1122 int offset = -1; 1123 1124 switch (c->type()) { 1125 case T_INT: 1126 case T_FLOAT: 1127 case T_ADDRESS: { 1128 LIR_Opr tmp = FrameMap::O7_opr; 1129 int value = c->as_jint_bits(); 1130 if (value == 0) { 1131 tmp = FrameMap::G0_opr; 1132 } else if (Assembler::is_simm13(value)) { 1133 __ set(value, O7); 1134 } 1135 if (addr->index()->is_valid()) { 1136 assert(addr->disp() == 0, "must be zero"); 1137 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1138 } else { 1139 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1140 offset = store(tmp, base, addr->disp(), type, wide, false); 1141 } 1142 break; 1143 } 1144 case T_LONG: 1145 case T_DOUBLE: { 1146 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); 1147 assert(Assembler::is_simm13(addr->disp()) && 1148 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 1149 1150 LIR_Opr tmp = FrameMap::O7_opr; 1151 int value_lo = c->as_jint_lo_bits(); 1152 if (value_lo == 0) { 1153 tmp = FrameMap::G0_opr; 1154 } else { 1155 __ set(value_lo, O7); 1156 } 1157 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 1158 int value_hi = c->as_jint_hi_bits(); 1159 if (value_hi == 0) { 1160 tmp = FrameMap::G0_opr; 1161 } else { 1162 __ set(value_hi, O7); 1163 } 1164 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 1165 break; 1166 } 1167 case T_OBJECT: { 1168 jobject obj = c->as_jobject(); 1169 LIR_Opr tmp; 1170 if (obj == NULL) { 1171 tmp = FrameMap::G0_opr; 1172 } else { 1173 tmp = FrameMap::O7_opr; 1174 jobject2reg(c->as_jobject(), O7); 1175 } 1176 // handle either reg+reg or reg+disp address 1177 if (addr->index()->is_valid()) { 1178 assert(addr->disp() == 0, "must be zero"); 1179 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 1180 } else { 1181 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 1182 offset = store(tmp, base, addr->disp(), type, wide, false); 1183 } 1184 1185 break; 1186 } 1187 default: 1188 Unimplemented(); 1189 } 1190 if (info != NULL) { 1191 assert(offset != -1, "offset should've been set"); 1192 add_debug_info_for_null_check(offset, info); 1193 } 1194 } 1195 1196 1197 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 1198 LIR_Const* c = src->as_constant_ptr(); 1199 LIR_Opr to_reg = dest; 1200 1201 switch (c->type()) { 1202 case T_INT: 1203 case T_ADDRESS: 1204 { 1205 jint con = c->as_jint(); 1206 if (to_reg->is_single_cpu()) { 1207 assert(patch_code == lir_patch_none, "no patching handled here"); 1208 __ set(con, to_reg->as_register()); 1209 } else { 1210 ShouldNotReachHere(); 1211 assert(to_reg->is_single_fpu(), "wrong register kind"); 1212 1213 __ set(con, O7); 1214 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); 1215 __ st(O7, temp_slot); 1216 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); 1217 } 1218 } 1219 break; 1220 1221 case T_LONG: 1222 { 1223 jlong con = c->as_jlong(); 1224 1225 if (to_reg->is_double_cpu()) { 1226 #ifdef _LP64 1227 __ set(con, to_reg->as_register_lo()); 1228 #else 1229 __ set(low(con), to_reg->as_register_lo()); 1230 __ set(high(con), to_reg->as_register_hi()); 1231 #endif 1232 #ifdef _LP64 1233 } else if (to_reg->is_single_cpu()) { 1234 __ set(con, to_reg->as_register()); 1235 #endif 1236 } else { 1237 ShouldNotReachHere(); 1238 assert(to_reg->is_double_fpu(), "wrong register kind"); 1239 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); 1240 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); 1241 __ set(low(con), O7); 1242 __ st(O7, temp_slot_lo); 1243 __ set(high(con), O7); 1244 __ st(O7, temp_slot_hi); 1245 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); 1246 } 1247 } 1248 break; 1249 1250 case T_OBJECT: 1251 { 1252 if (patch_code == lir_patch_none) { 1253 jobject2reg(c->as_jobject(), to_reg->as_register()); 1254 } else { 1255 jobject2reg_with_patching(to_reg->as_register(), info); 1256 } 1257 } 1258 break; 1259 1260 case T_METADATA: 1261 { 1262 if (patch_code == lir_patch_none) { 1263 metadata2reg(c->as_metadata(), to_reg->as_register()); 1264 } else { 1265 klass2reg_with_patching(to_reg->as_register(), info); 1266 } 1267 } 1268 break; 1269 1270 case T_FLOAT: 1271 { 1272 address const_addr = __ float_constant(c->as_jfloat()); 1273 if (const_addr == NULL) { 1274 bailout("const section overflow"); 1275 break; 1276 } 1277 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1278 AddressLiteral const_addrlit(const_addr, rspec); 1279 if (to_reg->is_single_fpu()) { 1280 __ patchable_sethi(const_addrlit, O7); 1281 __ relocate(rspec); 1282 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); 1283 1284 } else { 1285 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1286 1287 __ set(const_addrlit, O7); 1288 __ ld(O7, 0, to_reg->as_register()); 1289 } 1290 } 1291 break; 1292 1293 case T_DOUBLE: 1294 { 1295 address const_addr = __ double_constant(c->as_jdouble()); 1296 if (const_addr == NULL) { 1297 bailout("const section overflow"); 1298 break; 1299 } 1300 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1301 1302 if (to_reg->is_double_fpu()) { 1303 AddressLiteral const_addrlit(const_addr, rspec); 1304 __ patchable_sethi(const_addrlit, O7); 1305 __ relocate(rspec); 1306 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); 1307 } else { 1308 assert(to_reg->is_double_cpu(), "Must be a long register."); 1309 #ifdef _LP64 1310 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); 1311 #else 1312 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo()); 1313 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi()); 1314 #endif 1315 } 1316 1317 } 1318 break; 1319 1320 default: 1321 ShouldNotReachHere(); 1322 } 1323 } 1324 1325 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1326 Register reg = addr->base()->as_pointer_register(); 1327 LIR_Opr index = addr->index(); 1328 if (index->is_illegal()) { 1329 return Address(reg, addr->disp()); 1330 } else { 1331 assert (addr->disp() == 0, "unsupported address mode"); 1332 return Address(reg, index->as_pointer_register()); 1333 } 1334 } 1335 1336 1337 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1338 switch (type) { 1339 case T_INT: 1340 case T_FLOAT: { 1341 Register tmp = O7; 1342 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1343 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1344 __ lduw(from.base(), from.disp(), tmp); 1345 __ stw(tmp, to.base(), to.disp()); 1346 break; 1347 } 1348 case T_OBJECT: { 1349 Register tmp = O7; 1350 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1351 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1352 __ ld_ptr(from.base(), from.disp(), tmp); 1353 __ st_ptr(tmp, to.base(), to.disp()); 1354 break; 1355 } 1356 case T_LONG: 1357 case T_DOUBLE: { 1358 Register tmp = O7; 1359 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1360 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1361 __ lduw(from.base(), from.disp(), tmp); 1362 __ stw(tmp, to.base(), to.disp()); 1363 __ lduw(from.base(), from.disp() + 4, tmp); 1364 __ stw(tmp, to.base(), to.disp() + 4); 1365 break; 1366 } 1367 1368 default: 1369 ShouldNotReachHere(); 1370 } 1371 } 1372 1373 1374 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1375 Address base = as_Address(addr); 1376 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); 1377 } 1378 1379 1380 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1381 Address base = as_Address(addr); 1382 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); 1383 } 1384 1385 1386 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1387 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1388 1389 assert(type != T_METADATA, "load of metadata ptr not supported"); 1390 LIR_Address* addr = src_opr->as_address_ptr(); 1391 LIR_Opr to_reg = dest; 1392 1393 Register src = addr->base()->as_pointer_register(); 1394 Register disp_reg = noreg; 1395 int disp_value = addr->disp(); 1396 bool needs_patching = (patch_code != lir_patch_none); 1397 1398 if (addr->base()->type() == T_OBJECT) { 1399 __ verify_oop(src); 1400 } 1401 1402 PatchingStub* patch = NULL; 1403 if (needs_patching) { 1404 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1405 assert(!to_reg->is_double_cpu() || 1406 patch_code == lir_patch_none || 1407 patch_code == lir_patch_normal, "patching doesn't match register"); 1408 } 1409 1410 if (addr->index()->is_illegal()) { 1411 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1412 if (needs_patching) { 1413 __ patchable_set(0, O7); 1414 } else { 1415 __ set(disp_value, O7); 1416 } 1417 disp_reg = O7; 1418 } 1419 } else if (unaligned || PatchALot) { 1420 __ add(src, addr->index()->as_register(), O7); 1421 src = O7; 1422 } else { 1423 disp_reg = addr->index()->as_pointer_register(); 1424 assert(disp_value == 0, "can't handle 3 operand addresses"); 1425 } 1426 1427 // remember the offset of the load. The patching_epilog must be done 1428 // before the call to add_debug_info, otherwise the PcDescs don't get 1429 // entered in increasing order. 1430 int offset = code_offset(); 1431 1432 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1433 if (disp_reg == noreg) { 1434 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1435 } else { 1436 assert(!unaligned, "can't handle this"); 1437 offset = load(src, disp_reg, to_reg, type, wide); 1438 } 1439 1440 if (patch != NULL) { 1441 patching_epilog(patch, patch_code, src, info); 1442 } 1443 if (info != NULL) add_debug_info_for_null_check(offset, info); 1444 } 1445 1446 1447 void LIR_Assembler::prefetchr(LIR_Opr src) { 1448 LIR_Address* addr = src->as_address_ptr(); 1449 Address from_addr = as_Address(addr); 1450 1451 if (VM_Version::has_v9()) { 1452 __ prefetch(from_addr, Assembler::severalReads); 1453 } 1454 } 1455 1456 1457 void LIR_Assembler::prefetchw(LIR_Opr src) { 1458 LIR_Address* addr = src->as_address_ptr(); 1459 Address from_addr = as_Address(addr); 1460 1461 if (VM_Version::has_v9()) { 1462 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads); 1463 } 1464 } 1465 1466 1467 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1468 Address addr; 1469 if (src->is_single_word()) { 1470 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1471 } else if (src->is_double_word()) { 1472 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1473 } 1474 1475 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1476 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1477 } 1478 1479 1480 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1481 Address addr; 1482 if (dest->is_single_word()) { 1483 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1484 } else if (dest->is_double_word()) { 1485 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1486 } 1487 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1488 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1489 } 1490 1491 1492 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1493 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1494 if (from_reg->is_double_fpu()) { 1495 // double to double moves 1496 assert(to_reg->is_double_fpu(), "should match"); 1497 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); 1498 } else { 1499 // float to float moves 1500 assert(to_reg->is_single_fpu(), "should match"); 1501 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); 1502 } 1503 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1504 if (from_reg->is_double_cpu()) { 1505 #ifdef _LP64 1506 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); 1507 #else 1508 assert(to_reg->is_double_cpu() && 1509 from_reg->as_register_hi() != to_reg->as_register_lo() && 1510 from_reg->as_register_lo() != to_reg->as_register_hi(), 1511 "should both be long and not overlap"); 1512 // long to long moves 1513 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi()); 1514 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo()); 1515 #endif 1516 #ifdef _LP64 1517 } else if (to_reg->is_double_cpu()) { 1518 // int to int moves 1519 __ mov(from_reg->as_register(), to_reg->as_register_lo()); 1520 #endif 1521 } else { 1522 // int to int moves 1523 __ mov(from_reg->as_register(), to_reg->as_register()); 1524 } 1525 } else { 1526 ShouldNotReachHere(); 1527 } 1528 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1529 __ verify_oop(to_reg->as_register()); 1530 } 1531 } 1532 1533 1534 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1535 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1536 bool wide, bool unaligned) { 1537 assert(type != T_METADATA, "store of metadata ptr not supported"); 1538 LIR_Address* addr = dest->as_address_ptr(); 1539 1540 Register src = addr->base()->as_pointer_register(); 1541 Register disp_reg = noreg; 1542 int disp_value = addr->disp(); 1543 bool needs_patching = (patch_code != lir_patch_none); 1544 1545 if (addr->base()->is_oop_register()) { 1546 __ verify_oop(src); 1547 } 1548 1549 PatchingStub* patch = NULL; 1550 if (needs_patching) { 1551 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1552 assert(!from_reg->is_double_cpu() || 1553 patch_code == lir_patch_none || 1554 patch_code == lir_patch_normal, "patching doesn't match register"); 1555 } 1556 1557 if (addr->index()->is_illegal()) { 1558 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { 1559 if (needs_patching) { 1560 __ patchable_set(0, O7); 1561 } else { 1562 __ set(disp_value, O7); 1563 } 1564 disp_reg = O7; 1565 } 1566 } else if (unaligned || PatchALot) { 1567 __ add(src, addr->index()->as_register(), O7); 1568 src = O7; 1569 } else { 1570 disp_reg = addr->index()->as_pointer_register(); 1571 assert(disp_value == 0, "can't handle 3 operand addresses"); 1572 } 1573 1574 // remember the offset of the store. The patching_epilog must be done 1575 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1576 // entered in increasing order. 1577 int offset; 1578 1579 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 1580 if (disp_reg == noreg) { 1581 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1582 } else { 1583 assert(!unaligned, "can't handle this"); 1584 offset = store(from_reg, src, disp_reg, type, wide); 1585 } 1586 1587 if (patch != NULL) { 1588 patching_epilog(patch, patch_code, src, info); 1589 } 1590 1591 if (info != NULL) add_debug_info_for_null_check(offset, info); 1592 } 1593 1594 1595 void LIR_Assembler::return_op(LIR_Opr result) { 1596 // the poll may need a register so just pick one that isn't the return register 1597 #if defined(TIERED) && !defined(_LP64) 1598 if (result->type_field() == LIR_OprDesc::long_type) { 1599 // Must move the result to G1 1600 // Must leave proper result in O0,O1 and G1 (TIERED only) 1601 __ sllx(I0, 32, G1); // Shift bits into high G1 1602 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 1603 __ or3 (I1, G1, G1); // OR 64 bits into G1 1604 #ifdef ASSERT 1605 // mangle it so any problems will show up 1606 __ set(0xdeadbeef, I0); 1607 __ set(0xdeadbeef, I1); 1608 #endif 1609 } 1610 #endif // TIERED 1611 __ set((intptr_t)os::get_polling_page(), L0); 1612 __ relocate(relocInfo::poll_return_type); 1613 __ ld_ptr(L0, 0, G0); 1614 __ ret(); 1615 __ delayed()->restore(); 1616 } 1617 1618 1619 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1620 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); 1621 if (info != NULL) { 1622 add_debug_info_for_branch(info); 1623 } else { 1624 __ relocate(relocInfo::poll_type); 1625 } 1626 1627 int offset = __ offset(); 1628 __ ld_ptr(tmp->as_register(), 0, G0); 1629 1630 return offset; 1631 } 1632 1633 1634 void LIR_Assembler::emit_static_call_stub() { 1635 address call_pc = __ pc(); 1636 address stub = __ start_a_stub(call_stub_size); 1637 if (stub == NULL) { 1638 bailout("static call stub overflow"); 1639 return; 1640 } 1641 1642 int start = __ offset(); 1643 __ relocate(static_stub_Relocation::spec(call_pc)); 1644 1645 __ set_metadata(NULL, G5); 1646 // must be set to -1 at code generation time 1647 AddressLiteral addrlit(-1); 1648 __ jump_to(addrlit, G3); 1649 __ delayed()->nop(); 1650 1651 assert(__ offset() - start <= call_stub_size, "stub too big"); 1652 __ end_a_stub(); 1653 } 1654 1655 1656 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1657 if (opr1->is_single_fpu()) { 1658 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); 1659 } else if (opr1->is_double_fpu()) { 1660 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); 1661 } else if (opr1->is_single_cpu()) { 1662 if (opr2->is_constant()) { 1663 switch (opr2->as_constant_ptr()->type()) { 1664 case T_INT: 1665 { jint con = opr2->as_constant_ptr()->as_jint(); 1666 if (Assembler::is_simm13(con)) { 1667 __ cmp(opr1->as_register(), con); 1668 } else { 1669 __ set(con, O7); 1670 __ cmp(opr1->as_register(), O7); 1671 } 1672 } 1673 break; 1674 1675 case T_OBJECT: 1676 // there are only equal/notequal comparisions on objects 1677 { jobject con = opr2->as_constant_ptr()->as_jobject(); 1678 if (con == NULL) { 1679 __ cmp(opr1->as_register(), 0); 1680 } else { 1681 jobject2reg(con, O7); 1682 __ cmp(opr1->as_register(), O7); 1683 } 1684 } 1685 break; 1686 1687 default: 1688 ShouldNotReachHere(); 1689 break; 1690 } 1691 } else { 1692 if (opr2->is_address()) { 1693 LIR_Address * addr = opr2->as_address_ptr(); 1694 BasicType type = addr->type(); 1695 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1696 else __ ld(as_Address(addr), O7); 1697 __ cmp(opr1->as_register(), O7); 1698 } else { 1699 __ cmp(opr1->as_register(), opr2->as_register()); 1700 } 1701 } 1702 } else if (opr1->is_double_cpu()) { 1703 Register xlo = opr1->as_register_lo(); 1704 Register xhi = opr1->as_register_hi(); 1705 if (opr2->is_constant() && opr2->as_jlong() == 0) { 1706 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); 1707 #ifdef _LP64 1708 __ orcc(xhi, G0, G0); 1709 #else 1710 __ orcc(xhi, xlo, G0); 1711 #endif 1712 } else if (opr2->is_register()) { 1713 Register ylo = opr2->as_register_lo(); 1714 Register yhi = opr2->as_register_hi(); 1715 #ifdef _LP64 1716 __ cmp(xlo, ylo); 1717 #else 1718 __ subcc(xlo, ylo, xlo); 1719 __ subccc(xhi, yhi, xhi); 1720 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 1721 __ orcc(xhi, xlo, G0); 1722 } 1723 #endif 1724 } else { 1725 ShouldNotReachHere(); 1726 } 1727 } else if (opr1->is_address()) { 1728 LIR_Address * addr = opr1->as_address_ptr(); 1729 BasicType type = addr->type(); 1730 assert (opr2->is_constant(), "Checking"); 1731 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); 1732 else __ ld(as_Address(addr), O7); 1733 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); 1734 } else { 1735 ShouldNotReachHere(); 1736 } 1737 } 1738 1739 1740 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1741 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1742 bool is_unordered_less = (code == lir_ucmp_fd2i); 1743 if (left->is_single_fpu()) { 1744 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); 1745 } else if (left->is_double_fpu()) { 1746 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); 1747 } else { 1748 ShouldNotReachHere(); 1749 } 1750 } else if (code == lir_cmp_l2i) { 1751 #ifdef _LP64 1752 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); 1753 #else 1754 __ lcmp(left->as_register_hi(), left->as_register_lo(), 1755 right->as_register_hi(), right->as_register_lo(), 1756 dst->as_register()); 1757 #endif 1758 } else { 1759 ShouldNotReachHere(); 1760 } 1761 } 1762 1763 1764 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1765 Assembler::Condition acond; 1766 switch (condition) { 1767 case lir_cond_equal: acond = Assembler::equal; break; 1768 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1769 case lir_cond_less: acond = Assembler::less; break; 1770 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1771 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 1772 case lir_cond_greater: acond = Assembler::greater; break; 1773 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 1774 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 1775 default: ShouldNotReachHere(); 1776 }; 1777 1778 if (opr1->is_constant() && opr1->type() == T_INT) { 1779 Register dest = result->as_register(); 1780 // load up first part of constant before branch 1781 // and do the rest in the delay slot. 1782 if (!Assembler::is_simm13(opr1->as_jint())) { 1783 __ sethi(opr1->as_jint(), dest); 1784 } 1785 } else if (opr1->is_constant()) { 1786 const2reg(opr1, result, lir_patch_none, NULL); 1787 } else if (opr1->is_register()) { 1788 reg2reg(opr1, result); 1789 } else if (opr1->is_stack()) { 1790 stack2reg(opr1, result, result->type()); 1791 } else { 1792 ShouldNotReachHere(); 1793 } 1794 Label skip; 1795 #ifdef _LP64 1796 if (type == T_INT) { 1797 __ br(acond, false, Assembler::pt, skip); 1798 } else 1799 #endif 1800 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit 1801 if (opr1->is_constant() && opr1->type() == T_INT) { 1802 Register dest = result->as_register(); 1803 if (Assembler::is_simm13(opr1->as_jint())) { 1804 __ delayed()->or3(G0, opr1->as_jint(), dest); 1805 } else { 1806 // the sethi has been done above, so just put in the low 10 bits 1807 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); 1808 } 1809 } else { 1810 // can't do anything useful in the delay slot 1811 __ delayed()->nop(); 1812 } 1813 if (opr2->is_constant()) { 1814 const2reg(opr2, result, lir_patch_none, NULL); 1815 } else if (opr2->is_register()) { 1816 reg2reg(opr2, result); 1817 } else if (opr2->is_stack()) { 1818 stack2reg(opr2, result, result->type()); 1819 } else { 1820 ShouldNotReachHere(); 1821 } 1822 __ bind(skip); 1823 } 1824 1825 1826 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 1827 assert(info == NULL, "unused on this code path"); 1828 assert(left->is_register(), "wrong items state"); 1829 assert(dest->is_register(), "wrong items state"); 1830 1831 if (right->is_register()) { 1832 if (dest->is_float_kind()) { 1833 1834 FloatRegister lreg, rreg, res; 1835 FloatRegisterImpl::Width w; 1836 if (right->is_single_fpu()) { 1837 w = FloatRegisterImpl::S; 1838 lreg = left->as_float_reg(); 1839 rreg = right->as_float_reg(); 1840 res = dest->as_float_reg(); 1841 } else { 1842 w = FloatRegisterImpl::D; 1843 lreg = left->as_double_reg(); 1844 rreg = right->as_double_reg(); 1845 res = dest->as_double_reg(); 1846 } 1847 1848 switch (code) { 1849 case lir_add: __ fadd(w, lreg, rreg, res); break; 1850 case lir_sub: __ fsub(w, lreg, rreg, res); break; 1851 case lir_mul: // fall through 1852 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; 1853 case lir_div: // fall through 1854 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; 1855 default: ShouldNotReachHere(); 1856 } 1857 1858 } else if (dest->is_double_cpu()) { 1859 #ifdef _LP64 1860 Register dst_lo = dest->as_register_lo(); 1861 Register op1_lo = left->as_pointer_register(); 1862 Register op2_lo = right->as_pointer_register(); 1863 1864 switch (code) { 1865 case lir_add: 1866 __ add(op1_lo, op2_lo, dst_lo); 1867 break; 1868 1869 case lir_sub: 1870 __ sub(op1_lo, op2_lo, dst_lo); 1871 break; 1872 1873 default: ShouldNotReachHere(); 1874 } 1875 #else 1876 Register op1_lo = left->as_register_lo(); 1877 Register op1_hi = left->as_register_hi(); 1878 Register op2_lo = right->as_register_lo(); 1879 Register op2_hi = right->as_register_hi(); 1880 Register dst_lo = dest->as_register_lo(); 1881 Register dst_hi = dest->as_register_hi(); 1882 1883 switch (code) { 1884 case lir_add: 1885 __ addcc(op1_lo, op2_lo, dst_lo); 1886 __ addc (op1_hi, op2_hi, dst_hi); 1887 break; 1888 1889 case lir_sub: 1890 __ subcc(op1_lo, op2_lo, dst_lo); 1891 __ subc (op1_hi, op2_hi, dst_hi); 1892 break; 1893 1894 default: ShouldNotReachHere(); 1895 } 1896 #endif 1897 } else { 1898 assert (right->is_single_cpu(), "Just Checking"); 1899 1900 Register lreg = left->as_register(); 1901 Register res = dest->as_register(); 1902 Register rreg = right->as_register(); 1903 switch (code) { 1904 case lir_add: __ add (lreg, rreg, res); break; 1905 case lir_sub: __ sub (lreg, rreg, res); break; 1906 case lir_mul: __ mulx (lreg, rreg, res); break; 1907 default: ShouldNotReachHere(); 1908 } 1909 } 1910 } else { 1911 assert (right->is_constant(), "must be constant"); 1912 1913 if (dest->is_single_cpu()) { 1914 Register lreg = left->as_register(); 1915 Register res = dest->as_register(); 1916 int simm13 = right->as_constant_ptr()->as_jint(); 1917 1918 switch (code) { 1919 case lir_add: __ add (lreg, simm13, res); break; 1920 case lir_sub: __ sub (lreg, simm13, res); break; 1921 case lir_mul: __ mulx (lreg, simm13, res); break; 1922 default: ShouldNotReachHere(); 1923 } 1924 } else { 1925 Register lreg = left->as_pointer_register(); 1926 Register res = dest->as_register_lo(); 1927 long con = right->as_constant_ptr()->as_jlong(); 1928 assert(Assembler::is_simm13(con), "must be simm13"); 1929 1930 switch (code) { 1931 case lir_add: __ add (lreg, (int)con, res); break; 1932 case lir_sub: __ sub (lreg, (int)con, res); break; 1933 case lir_mul: __ mulx (lreg, (int)con, res); break; 1934 default: ShouldNotReachHere(); 1935 } 1936 } 1937 } 1938 } 1939 1940 1941 void LIR_Assembler::fpop() { 1942 // do nothing 1943 } 1944 1945 1946 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1947 switch (code) { 1948 case lir_sin: 1949 case lir_tan: 1950 case lir_cos: { 1951 assert(thread->is_valid(), "preserve the thread object for performance reasons"); 1952 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); 1953 break; 1954 } 1955 case lir_sqrt: { 1956 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1957 FloatRegister src_reg = value->as_double_reg(); 1958 FloatRegister dst_reg = dest->as_double_reg(); 1959 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); 1960 break; 1961 } 1962 case lir_abs: { 1963 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1964 FloatRegister src_reg = value->as_double_reg(); 1965 FloatRegister dst_reg = dest->as_double_reg(); 1966 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); 1967 break; 1968 } 1969 default: { 1970 ShouldNotReachHere(); 1971 break; 1972 } 1973 } 1974 } 1975 1976 1977 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1978 if (right->is_constant()) { 1979 if (dest->is_single_cpu()) { 1980 int simm13 = right->as_constant_ptr()->as_jint(); 1981 switch (code) { 1982 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; 1983 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; 1984 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; 1985 default: ShouldNotReachHere(); 1986 } 1987 } else { 1988 long c = right->as_constant_ptr()->as_jlong(); 1989 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); 1990 int simm13 = (int)c; 1991 switch (code) { 1992 case lir_logic_and: 1993 #ifndef _LP64 1994 __ and3 (left->as_register_hi(), 0, dest->as_register_hi()); 1995 #endif 1996 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); 1997 break; 1998 1999 case lir_logic_or: 2000 #ifndef _LP64 2001 __ or3 (left->as_register_hi(), 0, dest->as_register_hi()); 2002 #endif 2003 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); 2004 break; 2005 2006 case lir_logic_xor: 2007 #ifndef _LP64 2008 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi()); 2009 #endif 2010 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); 2011 break; 2012 2013 default: ShouldNotReachHere(); 2014 } 2015 } 2016 } else { 2017 assert(right->is_register(), "right should be in register"); 2018 2019 if (dest->is_single_cpu()) { 2020 switch (code) { 2021 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; 2022 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; 2023 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; 2024 default: ShouldNotReachHere(); 2025 } 2026 } else { 2027 #ifdef _LP64 2028 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 2029 left->as_register_lo(); 2030 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 2031 right->as_register_lo(); 2032 2033 switch (code) { 2034 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; 2035 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; 2036 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; 2037 default: ShouldNotReachHere(); 2038 } 2039 #else 2040 switch (code) { 2041 case lir_logic_and: 2042 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 2043 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 2044 break; 2045 2046 case lir_logic_or: 2047 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 2048 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 2049 break; 2050 2051 case lir_logic_xor: 2052 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); 2053 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); 2054 break; 2055 2056 default: ShouldNotReachHere(); 2057 } 2058 #endif 2059 } 2060 } 2061 } 2062 2063 2064 int LIR_Assembler::shift_amount(BasicType t) { 2065 int elem_size = type2aelembytes(t); 2066 switch (elem_size) { 2067 case 1 : return 0; 2068 case 2 : return 1; 2069 case 4 : return 2; 2070 case 8 : return 3; 2071 } 2072 ShouldNotReachHere(); 2073 return -1; 2074 } 2075 2076 2077 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2078 assert(exceptionOop->as_register() == Oexception, "should match"); 2079 assert(exceptionPC->as_register() == Oissuing_pc, "should match"); 2080 2081 info->add_register_oop(exceptionOop); 2082 2083 // reuse the debug info from the safepoint poll for the throw op itself 2084 address pc_for_athrow = __ pc(); 2085 int pc_for_athrow_offset = __ offset(); 2086 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 2087 __ set(pc_for_athrow, Oissuing_pc, rspec); 2088 add_call_info(pc_for_athrow_offset, info); // for exception handler 2089 2090 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 2091 __ delayed()->nop(); 2092 } 2093 2094 2095 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2096 assert(exceptionOop->as_register() == Oexception, "should match"); 2097 2098 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); 2099 __ delayed()->nop(); 2100 } 2101 2102 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2103 Register src = op->src()->as_register(); 2104 Register dst = op->dst()->as_register(); 2105 Register src_pos = op->src_pos()->as_register(); 2106 Register dst_pos = op->dst_pos()->as_register(); 2107 Register length = op->length()->as_register(); 2108 Register tmp = op->tmp()->as_register(); 2109 Register tmp2 = O7; 2110 2111 int flags = op->flags(); 2112 ciArrayKlass* default_type = op->expected_type(); 2113 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 2114 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 2115 2116 #ifdef _LP64 2117 // higher 32bits must be null 2118 __ sra(dst_pos, 0, dst_pos); 2119 __ sra(src_pos, 0, src_pos); 2120 __ sra(length, 0, length); 2121 #endif 2122 2123 // set up the arraycopy stub information 2124 ArrayCopyStub* stub = op->stub(); 2125 2126 // always do stub if no type information is available. it's ok if 2127 // the known type isn't loaded since the code sanity checks 2128 // in debug mode and the type isn't required when we know the exact type 2129 // also check that the type is an array type. 2130 if (op->expected_type() == NULL) { 2131 __ mov(src, O0); 2132 __ mov(src_pos, O1); 2133 __ mov(dst, O2); 2134 __ mov(dst_pos, O3); 2135 __ mov(length, O4); 2136 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2137 2138 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 2139 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); 2140 } else { 2141 #ifndef PRODUCT 2142 if (PrintC1Statistics) { 2143 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 2144 __ inc_counter(counter, G1, G3); 2145 } 2146 #endif 2147 __ call_VM_leaf(tmp, copyfunc_addr); 2148 } 2149 2150 if (copyfunc_addr != NULL) { 2151 __ xor3(O0, -1, tmp); 2152 __ sub(length, tmp, length); 2153 __ add(src_pos, tmp, src_pos); 2154 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 2155 __ delayed()->add(dst_pos, tmp, dst_pos); 2156 } else { 2157 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); 2158 __ delayed()->nop(); 2159 } 2160 __ bind(*stub->continuation()); 2161 return; 2162 } 2163 2164 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 2165 2166 // make sure src and dst are non-null and load array length 2167 if (flags & LIR_OpArrayCopy::src_null_check) { 2168 __ tst(src); 2169 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2170 __ delayed()->nop(); 2171 } 2172 2173 if (flags & LIR_OpArrayCopy::dst_null_check) { 2174 __ tst(dst); 2175 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 2176 __ delayed()->nop(); 2177 } 2178 2179 // If the compiler was not able to prove that exact type of the source or the destination 2180 // of the arraycopy is an array type, check at runtime if the source or the destination is 2181 // an instance type. 2182 if (flags & LIR_OpArrayCopy::type_check) { 2183 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { 2184 __ load_klass(dst, tmp); 2185 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 2186 __ cmp(tmp2, Klass::_lh_neutral_value); 2187 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 2188 __ delayed()->nop(); 2189 } 2190 2191 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { 2192 __ load_klass(src, tmp); 2193 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2); 2194 __ cmp(tmp2, Klass::_lh_neutral_value); 2195 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry()); 2196 __ delayed()->nop(); 2197 } 2198 } 2199 2200 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2201 // test src_pos register 2202 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry()); 2203 __ delayed()->nop(); 2204 } 2205 2206 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2207 // test dst_pos register 2208 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry()); 2209 __ delayed()->nop(); 2210 } 2211 2212 if (flags & LIR_OpArrayCopy::length_positive_check) { 2213 // make sure length isn't negative 2214 __ cmp_zero_and_br(Assembler::less, length, *stub->entry()); 2215 __ delayed()->nop(); 2216 } 2217 2218 if (flags & LIR_OpArrayCopy::src_range_check) { 2219 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); 2220 __ add(length, src_pos, tmp); 2221 __ cmp(tmp2, tmp); 2222 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2223 __ delayed()->nop(); 2224 } 2225 2226 if (flags & LIR_OpArrayCopy::dst_range_check) { 2227 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); 2228 __ add(length, dst_pos, tmp); 2229 __ cmp(tmp2, tmp); 2230 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); 2231 __ delayed()->nop(); 2232 } 2233 2234 int shift = shift_amount(basic_type); 2235 2236 if (flags & LIR_OpArrayCopy::type_check) { 2237 // We don't know the array types are compatible 2238 if (basic_type != T_OBJECT) { 2239 // Simple test for basic type arrays 2240 if (UseCompressedClassPointers) { 2241 // We don't need decode because we just need to compare 2242 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 2243 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2244 __ cmp(tmp, tmp2); 2245 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2246 } else { 2247 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 2248 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2249 __ cmp(tmp, tmp2); 2250 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2251 } 2252 __ delayed()->nop(); 2253 } else { 2254 // For object arrays, if src is a sub class of dst then we can 2255 // safely do the copy. 2256 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2257 2258 Label cont, slow; 2259 assert_different_registers(tmp, tmp2, G3, G1); 2260 2261 __ load_klass(src, G3); 2262 __ load_klass(dst, G1); 2263 2264 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); 2265 2266 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2267 __ delayed()->nop(); 2268 2269 __ cmp(G3, 0); 2270 if (copyfunc_addr != NULL) { // use stub if available 2271 // src is not a sub class of dst so we have to do a 2272 // per-element check. 2273 __ br(Assembler::notEqual, false, Assembler::pt, cont); 2274 __ delayed()->nop(); 2275 2276 __ bind(slow); 2277 2278 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2279 if ((flags & mask) != mask) { 2280 // Check that at least both of them object arrays. 2281 assert(flags & mask, "one of the two should be known to be an object array"); 2282 2283 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2284 __ load_klass(src, tmp); 2285 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2286 __ load_klass(dst, tmp); 2287 } 2288 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2289 2290 __ lduw(tmp, lh_offset, tmp2); 2291 2292 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2293 __ set(objArray_lh, tmp); 2294 __ cmp(tmp, tmp2); 2295 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 2296 __ delayed()->nop(); 2297 } 2298 2299 Register src_ptr = O0; 2300 Register dst_ptr = O1; 2301 Register len = O2; 2302 Register chk_off = O3; 2303 Register super_k = O4; 2304 2305 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2306 if (shift == 0) { 2307 __ add(src_ptr, src_pos, src_ptr); 2308 } else { 2309 __ sll(src_pos, shift, tmp); 2310 __ add(src_ptr, tmp, src_ptr); 2311 } 2312 2313 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2314 if (shift == 0) { 2315 __ add(dst_ptr, dst_pos, dst_ptr); 2316 } else { 2317 __ sll(dst_pos, shift, tmp); 2318 __ add(dst_ptr, tmp, dst_ptr); 2319 } 2320 __ mov(length, len); 2321 __ load_klass(dst, tmp); 2322 2323 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2324 __ ld_ptr(tmp, ek_offset, super_k); 2325 2326 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2327 __ lduw(super_k, sco_offset, chk_off); 2328 2329 __ call_VM_leaf(tmp, copyfunc_addr); 2330 2331 #ifndef PRODUCT 2332 if (PrintC1Statistics) { 2333 Label failed; 2334 __ br_notnull_short(O0, Assembler::pn, failed); 2335 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); 2336 __ bind(failed); 2337 } 2338 #endif 2339 2340 __ br_null(O0, false, Assembler::pt, *stub->continuation()); 2341 __ delayed()->xor3(O0, -1, tmp); 2342 2343 #ifndef PRODUCT 2344 if (PrintC1Statistics) { 2345 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); 2346 } 2347 #endif 2348 2349 __ sub(length, tmp, length); 2350 __ add(src_pos, tmp, src_pos); 2351 __ br(Assembler::always, false, Assembler::pt, *stub->entry()); 2352 __ delayed()->add(dst_pos, tmp, dst_pos); 2353 2354 __ bind(cont); 2355 } else { 2356 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 2357 __ delayed()->nop(); 2358 __ bind(cont); 2359 } 2360 } 2361 } 2362 2363 #ifdef ASSERT 2364 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2365 // Sanity check the known type with the incoming class. For the 2366 // primitive case the types must match exactly with src.klass and 2367 // dst.klass each exactly matching the default type. For the 2368 // object array case, if no type check is needed then either the 2369 // dst type is exactly the expected type and the src type is a 2370 // subtype which we can't check or src is the same array as dst 2371 // but not necessarily exactly of type default_type. 2372 Label known_ok, halt; 2373 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2374 if (UseCompressedClassPointers) { 2375 // tmp holds the default type. It currently comes uncompressed after the 2376 // load of a constant, so encode it. 2377 __ encode_klass_not_null(tmp); 2378 // load the raw value of the dst klass, since we will be comparing 2379 // uncompressed values directly. 2380 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2381 if (basic_type != T_OBJECT) { 2382 __ cmp(tmp, tmp2); 2383 __ br(Assembler::notEqual, false, Assembler::pn, halt); 2384 // load the raw value of the src klass. 2385 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 2386 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2387 } else { 2388 __ cmp(tmp, tmp2); 2389 __ br(Assembler::equal, false, Assembler::pn, known_ok); 2390 __ delayed()->cmp(src, dst); 2391 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2392 __ delayed()->nop(); 2393 } 2394 } else { 2395 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 2396 if (basic_type != T_OBJECT) { 2397 __ cmp(tmp, tmp2); 2398 __ brx(Assembler::notEqual, false, Assembler::pn, halt); 2399 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 2400 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); 2401 } else { 2402 __ cmp(tmp, tmp2); 2403 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2404 __ delayed()->cmp(src, dst); 2405 __ brx(Assembler::equal, false, Assembler::pn, known_ok); 2406 __ delayed()->nop(); 2407 } 2408 } 2409 __ bind(halt); 2410 __ stop("incorrect type information in arraycopy"); 2411 __ bind(known_ok); 2412 } 2413 #endif 2414 2415 #ifndef PRODUCT 2416 if (PrintC1Statistics) { 2417 address counter = Runtime1::arraycopy_count_address(basic_type); 2418 __ inc_counter(counter, G1, G3); 2419 } 2420 #endif 2421 2422 Register src_ptr = O0; 2423 Register dst_ptr = O1; 2424 Register len = O2; 2425 2426 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); 2427 if (shift == 0) { 2428 __ add(src_ptr, src_pos, src_ptr); 2429 } else { 2430 __ sll(src_pos, shift, tmp); 2431 __ add(src_ptr, tmp, src_ptr); 2432 } 2433 2434 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); 2435 if (shift == 0) { 2436 __ add(dst_ptr, dst_pos, dst_ptr); 2437 } else { 2438 __ sll(dst_pos, shift, tmp); 2439 __ add(dst_ptr, tmp, dst_ptr); 2440 } 2441 2442 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2443 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2444 const char *name; 2445 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2446 2447 // arraycopy stubs takes a length in number of elements, so don't scale it. 2448 __ mov(length, len); 2449 __ call_VM_leaf(tmp, entry); 2450 2451 __ bind(*stub->continuation()); 2452 } 2453 2454 2455 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2456 if (dest->is_single_cpu()) { 2457 #ifdef _LP64 2458 if (left->type() == T_OBJECT) { 2459 switch (code) { 2460 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; 2461 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; 2462 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2463 default: ShouldNotReachHere(); 2464 } 2465 } else 2466 #endif 2467 switch (code) { 2468 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; 2469 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; 2470 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; 2471 default: ShouldNotReachHere(); 2472 } 2473 } else { 2474 #ifdef _LP64 2475 switch (code) { 2476 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2477 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2478 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; 2479 default: ShouldNotReachHere(); 2480 } 2481 #else 2482 switch (code) { 2483 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2484 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2485 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; 2486 default: ShouldNotReachHere(); 2487 } 2488 #endif 2489 } 2490 } 2491 2492 2493 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2494 #ifdef _LP64 2495 if (left->type() == T_OBJECT) { 2496 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) 2497 Register l = left->as_register(); 2498 Register d = dest->as_register_lo(); 2499 switch (code) { 2500 case lir_shl: __ sllx (l, count, d); break; 2501 case lir_shr: __ srax (l, count, d); break; 2502 case lir_ushr: __ srlx (l, count, d); break; 2503 default: ShouldNotReachHere(); 2504 } 2505 return; 2506 } 2507 #endif 2508 2509 if (dest->is_single_cpu()) { 2510 count = count & 0x1F; // Java spec 2511 switch (code) { 2512 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; 2513 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; 2514 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; 2515 default: ShouldNotReachHere(); 2516 } 2517 } else if (dest->is_double_cpu()) { 2518 count = count & 63; // Java spec 2519 switch (code) { 2520 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2521 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2522 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; 2523 default: ShouldNotReachHere(); 2524 } 2525 } else { 2526 ShouldNotReachHere(); 2527 } 2528 } 2529 2530 2531 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2532 assert(op->tmp1()->as_register() == G1 && 2533 op->tmp2()->as_register() == G3 && 2534 op->tmp3()->as_register() == G4 && 2535 op->obj()->as_register() == O0 && 2536 op->klass()->as_register() == G5, "must be"); 2537 if (op->init_check()) { 2538 __ ldub(op->klass()->as_register(), 2539 in_bytes(InstanceKlass::init_state_offset()), 2540 op->tmp1()->as_register()); 2541 add_debug_info_for_null_check_here(op->stub()->info()); 2542 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2543 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); 2544 __ delayed()->nop(); 2545 } 2546 __ allocate_object(op->obj()->as_register(), 2547 op->tmp1()->as_register(), 2548 op->tmp2()->as_register(), 2549 op->tmp3()->as_register(), 2550 op->header_size(), 2551 op->object_size(), 2552 op->klass()->as_register(), 2553 *op->stub()->entry()); 2554 __ bind(*op->stub()->continuation()); 2555 __ verify_oop(op->obj()->as_register()); 2556 } 2557 2558 2559 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2560 assert(op->tmp1()->as_register() == G1 && 2561 op->tmp2()->as_register() == G3 && 2562 op->tmp3()->as_register() == G4 && 2563 op->tmp4()->as_register() == O1 && 2564 op->klass()->as_register() == G5, "must be"); 2565 2566 LP64_ONLY( __ signx(op->len()->as_register()); ) 2567 if (UseSlowPath || 2568 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2569 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2570 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 2571 __ delayed()->nop(); 2572 } else { 2573 __ allocate_array(op->obj()->as_register(), 2574 op->len()->as_register(), 2575 op->tmp1()->as_register(), 2576 op->tmp2()->as_register(), 2577 op->tmp3()->as_register(), 2578 arrayOopDesc::header_size(op->type()), 2579 type2aelembytes(op->type()), 2580 op->klass()->as_register(), 2581 *op->stub()->entry()); 2582 } 2583 __ bind(*op->stub()->continuation()); 2584 } 2585 2586 2587 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2588 ciMethodData *md, ciProfileData *data, 2589 Register recv, Register tmp1, Label* update_done) { 2590 uint i; 2591 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2592 Label next_test; 2593 // See if the receiver is receiver[n]. 2594 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2595 mdo_offset_bias); 2596 __ ld_ptr(receiver_addr, tmp1); 2597 __ verify_klass_ptr(tmp1); 2598 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test); 2599 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2600 mdo_offset_bias); 2601 __ ld_ptr(data_addr, tmp1); 2602 __ add(tmp1, DataLayout::counter_increment, tmp1); 2603 __ st_ptr(tmp1, data_addr); 2604 __ ba(*update_done); 2605 __ delayed()->nop(); 2606 __ bind(next_test); 2607 } 2608 2609 // Didn't find receiver; find next empty slot and fill it in 2610 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2611 Label next_test; 2612 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 2613 mdo_offset_bias); 2614 __ ld_ptr(recv_addr, tmp1); 2615 __ br_notnull_short(tmp1, Assembler::pt, next_test); 2616 __ st_ptr(recv, recv_addr); 2617 __ set(DataLayout::counter_increment, tmp1); 2618 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - 2619 mdo_offset_bias); 2620 __ ba(*update_done); 2621 __ delayed()->nop(); 2622 __ bind(next_test); 2623 } 2624 } 2625 2626 2627 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2628 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2629 md = method->method_data_or_null(); 2630 assert(md != NULL, "Sanity"); 2631 data = md->bci_to_data(bci); 2632 assert(data != NULL, "need data for checkcast"); 2633 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2634 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2635 // The offset is large so bias the mdo by the base of the slot so 2636 // that the ld can use simm13s to reference the slots of the data 2637 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2638 } 2639 } 2640 2641 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2642 // we always need a stub for the failure case. 2643 CodeStub* stub = op->stub(); 2644 Register obj = op->object()->as_register(); 2645 Register k_RInfo = op->tmp1()->as_register(); 2646 Register klass_RInfo = op->tmp2()->as_register(); 2647 Register dst = op->result_opr()->as_register(); 2648 Register Rtmp1 = op->tmp3()->as_register(); 2649 ciKlass* k = op->klass(); 2650 2651 2652 if (obj == k_RInfo) { 2653 k_RInfo = klass_RInfo; 2654 klass_RInfo = obj; 2655 } 2656 2657 ciMethodData* md; 2658 ciProfileData* data; 2659 int mdo_offset_bias = 0; 2660 if (op->should_profile()) { 2661 ciMethod* method = op->profiled_method(); 2662 assert(method != NULL, "Should have method"); 2663 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2664 2665 Label not_null; 2666 __ br_notnull_short(obj, Assembler::pn, not_null); 2667 Register mdo = k_RInfo; 2668 Register data_val = Rtmp1; 2669 metadata2reg(md->constant_encoding(), mdo); 2670 if (mdo_offset_bias > 0) { 2671 __ set(mdo_offset_bias, data_val); 2672 __ add(mdo, data_val, mdo); 2673 } 2674 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2675 __ ldub(flags_addr, data_val); 2676 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2677 __ stb(data_val, flags_addr); 2678 __ ba(*obj_is_null); 2679 __ delayed()->nop(); 2680 __ bind(not_null); 2681 } else { 2682 __ br_null(obj, false, Assembler::pn, *obj_is_null); 2683 __ delayed()->nop(); 2684 } 2685 2686 Label profile_cast_failure, profile_cast_success; 2687 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2688 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2689 2690 // patching may screw with our temporaries on sparc, 2691 // so let's do it before loading the class 2692 if (k->is_loaded()) { 2693 metadata2reg(k->constant_encoding(), k_RInfo); 2694 } else { 2695 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2696 } 2697 assert(obj != k_RInfo, "must be different"); 2698 2699 // get object class 2700 // not a safepoint as obj null check happens earlier 2701 __ load_klass(obj, klass_RInfo); 2702 if (op->fast_check()) { 2703 assert_different_registers(klass_RInfo, k_RInfo); 2704 __ cmp(k_RInfo, klass_RInfo); 2705 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); 2706 __ delayed()->nop(); 2707 } else { 2708 bool need_slow_path = true; 2709 if (k->is_loaded()) { 2710 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) 2711 need_slow_path = false; 2712 // perform the fast part of the checking logic 2713 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, 2714 (need_slow_path ? success_target : NULL), 2715 failure_target, NULL, 2716 RegisterOrConstant(k->super_check_offset())); 2717 } else { 2718 // perform the fast part of the checking logic 2719 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, 2720 failure_target, NULL); 2721 } 2722 if (need_slow_path) { 2723 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2724 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2725 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2726 __ delayed()->nop(); 2727 __ cmp(G3, 0); 2728 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2729 __ delayed()->nop(); 2730 // Fall through to success case 2731 } 2732 } 2733 2734 if (op->should_profile()) { 2735 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2736 assert_different_registers(obj, mdo, recv, tmp1); 2737 __ bind(profile_cast_success); 2738 metadata2reg(md->constant_encoding(), mdo); 2739 if (mdo_offset_bias > 0) { 2740 __ set(mdo_offset_bias, tmp1); 2741 __ add(mdo, tmp1, mdo); 2742 } 2743 __ load_klass(obj, recv); 2744 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 2745 // Jump over the failure case 2746 __ ba(*success); 2747 __ delayed()->nop(); 2748 // Cast failure case 2749 __ bind(profile_cast_failure); 2750 metadata2reg(md->constant_encoding(), mdo); 2751 if (mdo_offset_bias > 0) { 2752 __ set(mdo_offset_bias, tmp1); 2753 __ add(mdo, tmp1, mdo); 2754 } 2755 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2756 __ ld_ptr(data_addr, tmp1); 2757 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2758 __ st_ptr(tmp1, data_addr); 2759 __ ba(*failure); 2760 __ delayed()->nop(); 2761 } 2762 __ ba(*success); 2763 __ delayed()->nop(); 2764 } 2765 2766 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2767 LIR_Code code = op->code(); 2768 if (code == lir_store_check) { 2769 Register value = op->object()->as_register(); 2770 Register array = op->array()->as_register(); 2771 Register k_RInfo = op->tmp1()->as_register(); 2772 Register klass_RInfo = op->tmp2()->as_register(); 2773 Register Rtmp1 = op->tmp3()->as_register(); 2774 2775 __ verify_oop(value); 2776 CodeStub* stub = op->stub(); 2777 // check if it needs to be profiled 2778 ciMethodData* md; 2779 ciProfileData* data; 2780 int mdo_offset_bias = 0; 2781 if (op->should_profile()) { 2782 ciMethod* method = op->profiled_method(); 2783 assert(method != NULL, "Should have method"); 2784 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2785 } 2786 Label profile_cast_success, profile_cast_failure, done; 2787 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2788 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2789 2790 if (op->should_profile()) { 2791 Label not_null; 2792 __ br_notnull_short(value, Assembler::pn, not_null); 2793 Register mdo = k_RInfo; 2794 Register data_val = Rtmp1; 2795 metadata2reg(md->constant_encoding(), mdo); 2796 if (mdo_offset_bias > 0) { 2797 __ set(mdo_offset_bias, data_val); 2798 __ add(mdo, data_val, mdo); 2799 } 2800 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); 2801 __ ldub(flags_addr, data_val); 2802 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); 2803 __ stb(data_val, flags_addr); 2804 __ ba_short(done); 2805 __ bind(not_null); 2806 } else { 2807 __ br_null_short(value, Assembler::pn, done); 2808 } 2809 add_debug_info_for_null_check_here(op->info_for_exception()); 2810 __ load_klass(array, k_RInfo); 2811 __ load_klass(value, klass_RInfo); 2812 2813 // get instance klass 2814 __ ld_ptr(Address(k_RInfo, ObjArrayKlass::element_klass_offset()), k_RInfo); 2815 // perform the fast part of the checking logic 2816 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 2817 2818 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 2819 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); 2820 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 2821 __ delayed()->nop(); 2822 __ cmp(G3, 0); 2823 __ br(Assembler::equal, false, Assembler::pn, *failure_target); 2824 __ delayed()->nop(); 2825 // fall through to the success case 2826 2827 if (op->should_profile()) { 2828 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2829 assert_different_registers(value, mdo, recv, tmp1); 2830 __ bind(profile_cast_success); 2831 metadata2reg(md->constant_encoding(), mdo); 2832 if (mdo_offset_bias > 0) { 2833 __ set(mdo_offset_bias, tmp1); 2834 __ add(mdo, tmp1, mdo); 2835 } 2836 __ load_klass(value, recv); 2837 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2838 __ ba_short(done); 2839 // Cast failure case 2840 __ bind(profile_cast_failure); 2841 metadata2reg(md->constant_encoding(), mdo); 2842 if (mdo_offset_bias > 0) { 2843 __ set(mdo_offset_bias, tmp1); 2844 __ add(mdo, tmp1, mdo); 2845 } 2846 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2847 __ ld_ptr(data_addr, tmp1); 2848 __ sub(tmp1, DataLayout::counter_increment, tmp1); 2849 __ st_ptr(tmp1, data_addr); 2850 __ ba(*stub->entry()); 2851 __ delayed()->nop(); 2852 } 2853 __ bind(done); 2854 } else if (code == lir_checkcast) { 2855 Register obj = op->object()->as_register(); 2856 Register dst = op->result_opr()->as_register(); 2857 Label success; 2858 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2859 __ bind(success); 2860 __ mov(obj, dst); 2861 } else if (code == lir_instanceof) { 2862 Register obj = op->object()->as_register(); 2863 Register dst = op->result_opr()->as_register(); 2864 Label success, failure, done; 2865 emit_typecheck_helper(op, &success, &failure, &failure); 2866 __ bind(failure); 2867 __ set(0, dst); 2868 __ ba_short(done); 2869 __ bind(success); 2870 __ set(1, dst); 2871 __ bind(done); 2872 } else { 2873 ShouldNotReachHere(); 2874 } 2875 2876 } 2877 2878 2879 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2880 if (op->code() == lir_cas_long) { 2881 assert(VM_Version::supports_cx8(), "wrong machine"); 2882 Register addr = op->addr()->as_pointer_register(); 2883 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2884 Register cmp_value_hi = op->cmp_value()->as_register_hi(); 2885 Register new_value_lo = op->new_value()->as_register_lo(); 2886 Register new_value_hi = op->new_value()->as_register_hi(); 2887 Register t1 = op->tmp1()->as_register(); 2888 Register t2 = op->tmp2()->as_register(); 2889 #ifdef _LP64 2890 __ mov(cmp_value_lo, t1); 2891 __ mov(new_value_lo, t2); 2892 // perform the compare and swap operation 2893 __ casx(addr, t1, t2); 2894 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2895 // overwritten with the original value in "addr" and will be equal to t1. 2896 __ cmp(t1, t2); 2897 #else 2898 // move high and low halves of long values into single registers 2899 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg 2900 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half 2901 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value 2902 __ sllx(new_value_hi, 32, t2); 2903 __ srl(new_value_lo, 0, new_value_lo); 2904 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap 2905 // perform the compare and swap operation 2906 __ casx(addr, t1, t2); 2907 // generate condition code - if the swap succeeded, t2 ("new value" reg) was 2908 // overwritten with the original value in "addr" and will be equal to t1. 2909 // Produce icc flag for 32bit. 2910 __ sub(t1, t2, t2); 2911 __ srlx(t2, 32, t1); 2912 __ orcc(t2, t1, G0); 2913 #endif 2914 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2915 Register addr = op->addr()->as_pointer_register(); 2916 Register cmp_value = op->cmp_value()->as_register(); 2917 Register new_value = op->new_value()->as_register(); 2918 Register t1 = op->tmp1()->as_register(); 2919 Register t2 = op->tmp2()->as_register(); 2920 __ mov(cmp_value, t1); 2921 __ mov(new_value, t2); 2922 if (op->code() == lir_cas_obj) { 2923 if (UseCompressedOops) { 2924 __ encode_heap_oop(t1); 2925 __ encode_heap_oop(t2); 2926 __ cas(addr, t1, t2); 2927 } else { 2928 __ cas_ptr(addr, t1, t2); 2929 } 2930 } else { 2931 __ cas(addr, t1, t2); 2932 } 2933 __ cmp(t1, t2); 2934 } else { 2935 Unimplemented(); 2936 } 2937 } 2938 2939 void LIR_Assembler::set_24bit_FPU() { 2940 Unimplemented(); 2941 } 2942 2943 2944 void LIR_Assembler::reset_FPU() { 2945 Unimplemented(); 2946 } 2947 2948 2949 void LIR_Assembler::breakpoint() { 2950 __ breakpoint_trap(); 2951 } 2952 2953 2954 void LIR_Assembler::push(LIR_Opr opr) { 2955 Unimplemented(); 2956 } 2957 2958 2959 void LIR_Assembler::pop(LIR_Opr opr) { 2960 Unimplemented(); 2961 } 2962 2963 2964 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2965 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2966 Register dst = dst_opr->as_register(); 2967 Register reg = mon_addr.base(); 2968 int offset = mon_addr.disp(); 2969 // compute pointer to BasicLock 2970 if (mon_addr.is_simm13()) { 2971 __ add(reg, offset, dst); 2972 } else { 2973 __ set(offset, dst); 2974 __ add(dst, reg, dst); 2975 } 2976 } 2977 2978 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 2979 fatal("CRC32 intrinsic is not implemented on this platform"); 2980 } 2981 2982 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2983 Register obj = op->obj_opr()->as_register(); 2984 Register hdr = op->hdr_opr()->as_register(); 2985 Register lock = op->lock_opr()->as_register(); 2986 2987 // obj may not be an oop 2988 if (op->code() == lir_lock) { 2989 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2990 if (UseFastLocking) { 2991 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2992 // add debug info for NullPointerException only if one is possible 2993 if (op->info() != NULL) { 2994 add_debug_info_for_null_check_here(op->info()); 2995 } 2996 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2997 } else { 2998 // always do slow locking 2999 // note: the slow locking code could be inlined here, however if we use 3000 // slow locking, speed doesn't matter anyway and this solution is 3001 // simpler and requires less duplicated code - additionally, the 3002 // slow locking code is the same in either case which simplifies 3003 // debugging 3004 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 3005 __ delayed()->nop(); 3006 } 3007 } else { 3008 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 3009 if (UseFastLocking) { 3010 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3011 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 3012 } else { 3013 // always do slow unlocking 3014 // note: the slow unlocking code could be inlined here, however if we use 3015 // slow unlocking, speed doesn't matter anyway and this solution is 3016 // simpler and requires less duplicated code - additionally, the 3017 // slow unlocking code is the same in either case which simplifies 3018 // debugging 3019 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); 3020 __ delayed()->nop(); 3021 } 3022 } 3023 __ bind(*op->stub()->continuation()); 3024 } 3025 3026 3027 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 3028 ciMethod* method = op->profiled_method(); 3029 int bci = op->profiled_bci(); 3030 ciMethod* callee = op->profiled_callee(); 3031 3032 // Update counter for all call types 3033 ciMethodData* md = method->method_data_or_null(); 3034 assert(md != NULL, "Sanity"); 3035 ciProfileData* data = md->bci_to_data(bci); 3036 assert(data->is_CounterData(), "need CounterData for calls"); 3037 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 3038 Register mdo = op->mdo()->as_register(); 3039 #ifdef _LP64 3040 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 3041 Register tmp1 = op->tmp1()->as_register_lo(); 3042 #else 3043 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 3044 Register tmp1 = op->tmp1()->as_register(); 3045 #endif 3046 metadata2reg(md->constant_encoding(), mdo); 3047 int mdo_offset_bias = 0; 3048 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + 3049 data->size_in_bytes())) { 3050 // The offset is large so bias the mdo by the base of the slot so 3051 // that the ld can use simm13s to reference the slots of the data 3052 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 3053 __ set(mdo_offset_bias, O7); 3054 __ add(mdo, O7, mdo); 3055 } 3056 3057 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 3058 Bytecodes::Code bc = method->java_code_at_bci(bci); 3059 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 3060 // Perform additional virtual call profiling for invokevirtual and 3061 // invokeinterface bytecodes 3062 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 3063 !callee_is_static && // required for optimized MH invokes 3064 C1ProfileVirtualCalls) { 3065 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3066 Register recv = op->recv()->as_register(); 3067 assert_different_registers(mdo, tmp1, recv); 3068 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3069 ciKlass* known_klass = op->known_holder(); 3070 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 3071 // We know the type that will be seen at this call site; we can 3072 // statically update the MethodData* rather than needing to do 3073 // dynamic tests on the receiver type 3074 3075 // NOTE: we should probably put a lock around this search to 3076 // avoid collisions by concurrent compilations 3077 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3078 uint i; 3079 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3080 ciKlass* receiver = vc_data->receiver(i); 3081 if (known_klass->equals(receiver)) { 3082 Address data_addr(mdo, md->byte_offset_of_slot(data, 3083 VirtualCallData::receiver_count_offset(i)) - 3084 mdo_offset_bias); 3085 __ ld_ptr(data_addr, tmp1); 3086 __ add(tmp1, DataLayout::counter_increment, tmp1); 3087 __ st_ptr(tmp1, data_addr); 3088 return; 3089 } 3090 } 3091 3092 // Receiver type not found in profile data; select an empty slot 3093 3094 // Note that this is less efficient than it should be because it 3095 // always does a write to the receiver part of the 3096 // VirtualCallData rather than just the first time 3097 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3098 ciKlass* receiver = vc_data->receiver(i); 3099 if (receiver == NULL) { 3100 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - 3101 mdo_offset_bias); 3102 metadata2reg(known_klass->constant_encoding(), tmp1); 3103 __ st_ptr(tmp1, recv_addr); 3104 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - 3105 mdo_offset_bias); 3106 __ ld_ptr(data_addr, tmp1); 3107 __ add(tmp1, DataLayout::counter_increment, tmp1); 3108 __ st_ptr(tmp1, data_addr); 3109 return; 3110 } 3111 } 3112 } else { 3113 __ load_klass(recv, recv); 3114 Label update_done; 3115 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 3116 // Receiver did not match any saved receiver and there is no empty row for it. 3117 // Increment total counter to indicate polymorphic case. 3118 __ ld_ptr(counter_addr, tmp1); 3119 __ add(tmp1, DataLayout::counter_increment, tmp1); 3120 __ st_ptr(tmp1, counter_addr); 3121 3122 __ bind(update_done); 3123 } 3124 } else { 3125 // Static call 3126 __ ld_ptr(counter_addr, tmp1); 3127 __ add(tmp1, DataLayout::counter_increment, tmp1); 3128 __ st_ptr(tmp1, counter_addr); 3129 } 3130 } 3131 3132 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3133 Register obj = op->obj()->as_register(); 3134 Register tmp1 = op->tmp()->as_pointer_register(); 3135 Register tmp2 = G1; 3136 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3137 ciKlass* exact_klass = op->exact_klass(); 3138 intptr_t current_klass = op->current_klass(); 3139 bool not_null = op->not_null(); 3140 bool no_conflict = op->no_conflict(); 3141 3142 Label update, next, none; 3143 3144 bool do_null = !not_null; 3145 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3146 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3147 3148 assert(do_null || do_update, "why are we here?"); 3149 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3150 3151 __ verify_oop(obj); 3152 3153 if (tmp1 != obj) { 3154 __ mov(obj, tmp1); 3155 } 3156 if (do_null) { 3157 __ br_notnull_short(tmp1, Assembler::pt, update); 3158 if (!TypeEntries::was_null_seen(current_klass)) { 3159 __ ld_ptr(mdo_addr, tmp1); 3160 __ or3(tmp1, TypeEntries::null_seen, tmp1); 3161 __ st_ptr(tmp1, mdo_addr); 3162 } 3163 if (do_update) { 3164 __ ba(next); 3165 __ delayed()->nop(); 3166 } 3167 #ifdef ASSERT 3168 } else { 3169 __ br_notnull_short(tmp1, Assembler::pt, update); 3170 __ stop("unexpect null obj"); 3171 #endif 3172 } 3173 3174 __ bind(update); 3175 3176 if (do_update) { 3177 #ifdef ASSERT 3178 if (exact_klass != NULL) { 3179 Label ok; 3180 __ load_klass(tmp1, tmp1); 3181 metadata2reg(exact_klass->constant_encoding(), tmp2); 3182 __ cmp_and_br_short(tmp1, tmp2, Assembler::equal, Assembler::pt, ok); 3183 __ stop("exact klass and actual klass differ"); 3184 __ bind(ok); 3185 } 3186 #endif 3187 3188 Label do_update; 3189 __ ld_ptr(mdo_addr, tmp2); 3190 3191 if (!no_conflict) { 3192 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3193 if (exact_klass != NULL) { 3194 metadata2reg(exact_klass->constant_encoding(), tmp1); 3195 } else { 3196 __ load_klass(tmp1, tmp1); 3197 } 3198 3199 __ xor3(tmp1, tmp2, tmp1); 3200 __ btst(TypeEntries::type_klass_mask, tmp1); 3201 // klass seen before, nothing to do. The unknown bit may have been 3202 // set already but no need to check. 3203 __ brx(Assembler::zero, false, Assembler::pt, next); 3204 __ delayed()-> 3205 3206 btst(TypeEntries::type_unknown, tmp1); 3207 // already unknown. Nothing to do anymore. 3208 __ brx(Assembler::notZero, false, Assembler::pt, next); 3209 3210 if (TypeEntries::is_type_none(current_klass)) { 3211 __ delayed()->btst(TypeEntries::type_mask, tmp2); 3212 __ brx(Assembler::zero, true, Assembler::pt, do_update); 3213 // first time here. Set profile type. 3214 __ delayed()->or3(tmp2, tmp1, tmp2); 3215 } else { 3216 __ delayed()->nop(); 3217 } 3218 } else { 3219 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3220 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3221 3222 __ btst(TypeEntries::type_unknown, tmp2); 3223 // already unknown. Nothing to do anymore. 3224 __ brx(Assembler::notZero, false, Assembler::pt, next); 3225 __ delayed()->nop(); 3226 } 3227 3228 // different than before. Cannot keep accurate profile. 3229 __ or3(tmp2, TypeEntries::type_unknown, tmp2); 3230 } else { 3231 // There's a single possible klass at this profile point 3232 assert(exact_klass != NULL, "should be"); 3233 if (TypeEntries::is_type_none(current_klass)) { 3234 metadata2reg(exact_klass->constant_encoding(), tmp1); 3235 __ xor3(tmp1, tmp2, tmp1); 3236 __ btst(TypeEntries::type_klass_mask, tmp1); 3237 __ brx(Assembler::zero, false, Assembler::pt, next); 3238 #ifdef ASSERT 3239 3240 { 3241 Label ok; 3242 __ delayed()->btst(TypeEntries::type_mask, tmp2); 3243 __ brx(Assembler::zero, true, Assembler::pt, ok); 3244 __ delayed()->nop(); 3245 3246 __ stop("unexpected profiling mismatch"); 3247 __ bind(ok); 3248 } 3249 // first time here. Set profile type. 3250 __ or3(tmp2, tmp1, tmp2); 3251 #else 3252 // first time here. Set profile type. 3253 __ delayed()->or3(tmp2, tmp1, tmp2); 3254 #endif 3255 3256 } else { 3257 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3258 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3259 3260 // already unknown. Nothing to do anymore. 3261 __ btst(TypeEntries::type_unknown, tmp2); 3262 __ brx(Assembler::notZero, false, Assembler::pt, next); 3263 __ delayed()->or3(tmp2, TypeEntries::type_unknown, tmp2); 3264 } 3265 } 3266 3267 __ bind(do_update); 3268 __ st_ptr(tmp2, mdo_addr); 3269 3270 __ bind(next); 3271 } 3272 } 3273 3274 void LIR_Assembler::align_backward_branch_target() { 3275 __ align(OptoLoopAlignment); 3276 } 3277 3278 3279 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 3280 // make sure we are expecting a delay 3281 // this has the side effect of clearing the delay state 3282 // so we can use _masm instead of _masm->delayed() to do the 3283 // code generation. 3284 __ delayed(); 3285 3286 // make sure we only emit one instruction 3287 int offset = code_offset(); 3288 op->delay_op()->emit_code(this); 3289 #ifdef ASSERT 3290 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { 3291 op->delay_op()->print(); 3292 } 3293 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, 3294 "only one instruction can go in a delay slot"); 3295 #endif 3296 3297 // we may also be emitting the call info for the instruction 3298 // which we are the delay slot of. 3299 CodeEmitInfo* call_info = op->call_info(); 3300 if (call_info) { 3301 add_call_info(code_offset(), call_info); 3302 } 3303 3304 if (VerifyStackAtCalls) { 3305 _masm->sub(FP, SP, O7); 3306 _masm->cmp(O7, initial_frame_size_in_bytes()); 3307 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); 3308 } 3309 } 3310 3311 3312 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 3313 assert(left->is_register(), "can only handle registers"); 3314 3315 if (left->is_single_cpu()) { 3316 __ neg(left->as_register(), dest->as_register()); 3317 } else if (left->is_single_fpu()) { 3318 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); 3319 } else if (left->is_double_fpu()) { 3320 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); 3321 } else { 3322 assert (left->is_double_cpu(), "Must be a long"); 3323 Register Rlow = left->as_register_lo(); 3324 Register Rhi = left->as_register_hi(); 3325 #ifdef _LP64 3326 __ sub(G0, Rlow, dest->as_register_lo()); 3327 #else 3328 __ subcc(G0, Rlow, dest->as_register_lo()); 3329 __ subc (G0, Rhi, dest->as_register_hi()); 3330 #endif 3331 } 3332 } 3333 3334 3335 void LIR_Assembler::fxch(int i) { 3336 Unimplemented(); 3337 } 3338 3339 void LIR_Assembler::fld(int i) { 3340 Unimplemented(); 3341 } 3342 3343 void LIR_Assembler::ffree(int i) { 3344 Unimplemented(); 3345 } 3346 3347 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 3348 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3349 3350 // if tmp is invalid, then the function being called doesn't destroy the thread 3351 if (tmp->is_valid()) { 3352 __ save_thread(tmp->as_register()); 3353 } 3354 __ call(dest, relocInfo::runtime_call_type); 3355 __ delayed()->nop(); 3356 if (info != NULL) { 3357 add_call_info_here(info); 3358 } 3359 if (tmp->is_valid()) { 3360 __ restore_thread(tmp->as_register()); 3361 } 3362 3363 #ifdef ASSERT 3364 __ verify_thread(); 3365 #endif // ASSERT 3366 } 3367 3368 3369 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3370 #ifdef _LP64 3371 ShouldNotReachHere(); 3372 #endif 3373 3374 NEEDS_CLEANUP; 3375 if (type == T_LONG) { 3376 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); 3377 3378 // (extended to allow indexed as well as constant displaced for JSR-166) 3379 Register idx = noreg; // contains either constant offset or index 3380 3381 int disp = mem_addr->disp(); 3382 if (mem_addr->index() == LIR_OprFact::illegalOpr) { 3383 if (!Assembler::is_simm13(disp)) { 3384 idx = O7; 3385 __ set(disp, idx); 3386 } 3387 } else { 3388 assert(disp == 0, "not both indexed and disp"); 3389 idx = mem_addr->index()->as_register(); 3390 } 3391 3392 int null_check_offset = -1; 3393 3394 Register base = mem_addr->base()->as_register(); 3395 if (src->is_register() && dest->is_address()) { 3396 // G4 is high half, G5 is low half 3397 // clear the top bits of G5, and scale up G4 3398 __ srl (src->as_register_lo(), 0, G5); 3399 __ sllx(src->as_register_hi(), 32, G4); 3400 // combine the two halves into the 64 bits of G4 3401 __ or3(G4, G5, G4); 3402 null_check_offset = __ offset(); 3403 if (idx == noreg) { 3404 __ stx(G4, base, disp); 3405 } else { 3406 __ stx(G4, base, idx); 3407 } 3408 } else if (src->is_address() && dest->is_register()) { 3409 null_check_offset = __ offset(); 3410 if (idx == noreg) { 3411 __ ldx(base, disp, G5); 3412 } else { 3413 __ ldx(base, idx, G5); 3414 } 3415 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 3416 __ mov (G5, dest->as_register_lo()); // copy low half into lo 3417 } else { 3418 Unimplemented(); 3419 } 3420 if (info != NULL) { 3421 add_debug_info_for_null_check(null_check_offset, info); 3422 } 3423 3424 } else { 3425 // use normal move for all other volatiles since they don't need 3426 // special handling to remain atomic. 3427 move_op(src, dest, type, lir_patch_none, info, false, false, false); 3428 } 3429 } 3430 3431 void LIR_Assembler::membar() { 3432 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode 3433 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3434 } 3435 3436 void LIR_Assembler::membar_acquire() { 3437 // no-op on TSO 3438 } 3439 3440 void LIR_Assembler::membar_release() { 3441 // no-op on TSO 3442 } 3443 3444 void LIR_Assembler::membar_loadload() { 3445 // no-op 3446 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 3447 } 3448 3449 void LIR_Assembler::membar_storestore() { 3450 // no-op 3451 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 3452 } 3453 3454 void LIR_Assembler::membar_loadstore() { 3455 // no-op 3456 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 3457 } 3458 3459 void LIR_Assembler::membar_storeload() { 3460 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3461 } 3462 3463 3464 // Pack two sequential registers containing 32 bit values 3465 // into a single 64 bit register. 3466 // src and src->successor() are packed into dst 3467 // src and dst may be the same register. 3468 // Note: src is destroyed 3469 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) { 3470 Register rs = src->as_register(); 3471 Register rd = dst->as_register_lo(); 3472 __ sllx(rs, 32, rs); 3473 __ srl(rs->successor(), 0, rs->successor()); 3474 __ or3(rs, rs->successor(), rd); 3475 } 3476 3477 // Unpack a 64 bit value in a register into 3478 // two sequential registers. 3479 // src is unpacked into dst and dst->successor() 3480 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) { 3481 Register rs = src->as_register_lo(); 3482 Register rd = dst->as_register_hi(); 3483 assert_different_registers(rs, rd, rd->successor()); 3484 __ srlx(rs, 32, rd); 3485 __ srl (rs, 0, rd->successor()); 3486 } 3487 3488 3489 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 3490 LIR_Address* addr = addr_opr->as_address_ptr(); 3491 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet"); 3492 3493 if (Assembler::is_simm13(addr->disp())) { 3494 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register()); 3495 } else { 3496 __ set(addr->disp(), G3_scratch); 3497 __ add(addr->base()->as_pointer_register(), G3_scratch, dest->as_pointer_register()); 3498 } 3499 } 3500 3501 3502 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3503 assert(result_reg->is_register(), "check"); 3504 __ mov(G2_thread, result_reg->as_register()); 3505 } 3506 3507 #ifdef ASSERT 3508 // emit run-time assertion 3509 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 3510 assert(op->code() == lir_assert, "must be"); 3511 3512 if (op->in_opr1()->is_valid()) { 3513 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 3514 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 3515 } else { 3516 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 3517 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 3518 } 3519 3520 Label ok; 3521 if (op->condition() != lir_cond_always) { 3522 Assembler::Condition acond; 3523 switch (op->condition()) { 3524 case lir_cond_equal: acond = Assembler::equal; break; 3525 case lir_cond_notEqual: acond = Assembler::notEqual; break; 3526 case lir_cond_less: acond = Assembler::less; break; 3527 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 3528 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; 3529 case lir_cond_greater: acond = Assembler::greater; break; 3530 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; 3531 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; 3532 default: ShouldNotReachHere(); 3533 }; 3534 __ br(acond, false, Assembler::pt, ok); 3535 __ delayed()->nop(); 3536 } 3537 if (op->halt()) { 3538 const char* str = __ code_string(op->msg()); 3539 __ stop(str); 3540 } else { 3541 breakpoint(); 3542 } 3543 __ bind(ok); 3544 } 3545 #endif 3546 3547 void LIR_Assembler::peephole(LIR_List* lir) { 3548 LIR_OpList* inst = lir->instructions_list(); 3549 for (int i = 0; i < inst->length(); i++) { 3550 LIR_Op* op = inst->at(i); 3551 switch (op->code()) { 3552 case lir_cond_float_branch: 3553 case lir_branch: { 3554 LIR_OpBranch* branch = op->as_OpBranch(); 3555 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); 3556 LIR_Op* delay_op = NULL; 3557 // we'd like to be able to pull following instructions into 3558 // this slot but we don't know enough to do it safely yet so 3559 // only optimize block to block control flow. 3560 if (LIRFillDelaySlots && branch->block()) { 3561 LIR_Op* prev = inst->at(i - 1); 3562 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { 3563 // swap previous instruction into delay slot 3564 inst->at_put(i - 1, op); 3565 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3566 #ifndef PRODUCT 3567 if (LIRTracePeephole) { 3568 tty->print_cr("delayed"); 3569 inst->at(i - 1)->print(); 3570 inst->at(i)->print(); 3571 tty->cr(); 3572 } 3573 #endif 3574 continue; 3575 } 3576 } 3577 3578 if (!delay_op) { 3579 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); 3580 } 3581 inst->insert_before(i + 1, delay_op); 3582 break; 3583 } 3584 case lir_static_call: 3585 case lir_virtual_call: 3586 case lir_icvirtual_call: 3587 case lir_optvirtual_call: 3588 case lir_dynamic_call: { 3589 LIR_Op* prev = inst->at(i - 1); 3590 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && 3591 (op->code() != lir_virtual_call || 3592 !prev->result_opr()->is_single_cpu() || 3593 prev->result_opr()->as_register() != O0) && 3594 LIR_Assembler::is_single_instruction(prev)) { 3595 // Only moves without info can be put into the delay slot. 3596 // Also don't allow the setup of the receiver in the delay 3597 // slot for vtable calls. 3598 inst->at_put(i - 1, op); 3599 inst->at_put(i, new LIR_OpDelay(prev, op->info())); 3600 #ifndef PRODUCT 3601 if (LIRTracePeephole) { 3602 tty->print_cr("delayed"); 3603 inst->at(i - 1)->print(); 3604 inst->at(i)->print(); 3605 tty->cr(); 3606 } 3607 #endif 3608 } else { 3609 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); 3610 inst->insert_before(i + 1, delay_op); 3611 i++; 3612 } 3613 3614 #if defined(TIERED) && !defined(_LP64) 3615 // fixup the return value from G1 to O0/O1 for long returns. 3616 // It's done here instead of in LIRGenerator because there's 3617 // such a mismatch between the single reg and double reg 3618 // calling convention. 3619 LIR_OpJavaCall* callop = op->as_OpJavaCall(); 3620 if (callop->result_opr() == FrameMap::out_long_opr) { 3621 LIR_OpJavaCall* call; 3622 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length()); 3623 for (int a = 0; a < arguments->length(); a++) { 3624 arguments[a] = callop->arguments()[a]; 3625 } 3626 if (op->code() == lir_virtual_call) { 3627 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3628 callop->vtable_offset(), arguments, callop->info()); 3629 } else { 3630 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, 3631 callop->addr(), arguments, callop->info()); 3632 } 3633 inst->at_put(i - 1, call); 3634 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(), 3635 T_LONG, lir_patch_none, NULL)); 3636 } 3637 #endif 3638 break; 3639 } 3640 } 3641 } 3642 } 3643 3644 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 3645 LIR_Address* addr = src->as_address_ptr(); 3646 3647 assert(data == dest, "swap uses only 2 operands"); 3648 assert (code == lir_xchg, "no xadd on sparc"); 3649 3650 if (data->type() == T_INT) { 3651 __ swap(as_Address(addr), data->as_register()); 3652 } else if (data->is_oop()) { 3653 Register obj = data->as_register(); 3654 Register narrow = tmp->as_register(); 3655 #ifdef _LP64 3656 assert(UseCompressedOops, "swap is 32bit only"); 3657 __ encode_heap_oop(obj, narrow); 3658 __ swap(as_Address(addr), narrow); 3659 __ decode_heap_oop(narrow, obj); 3660 #else 3661 __ swap(as_Address(addr), obj); 3662 #endif 3663 } else { 3664 ShouldNotReachHere(); 3665 } 3666 } 3667 3668 #undef __