1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "code/aotCodeCache.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gc_globals.hpp"
39 #include "nativeInst_x86.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "runtime/threadIdentifier.hpp"
46 #include "utilities/powerOfTwo.hpp"
47 #include "vmreg_x86.inline.hpp"
48
49
50 // These masks are used to provide 128-bit aligned bitmasks to the XMM
51 // instructions, to allow sign-masking or sign-bit flipping. They allow
52 // fast versions of NegF/NegD and AbsF/AbsD.
53
54 // Note: 'double' and 'long long' have 32-bits alignment on x86.
55 static address double_quadword(jlong *adr, jlong lo, jlong hi) {
56 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
57 // of 128-bits operands for SSE instructions.
58 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
59 // Store the value to a 128-bits operand.
60 operand[0] = lo;
61 operand[1] = hi;
62 return (address)operand;
63 }
64
65 // Buffer for 128-bits masks used by SSE instructions.
66 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
67
68 // Static initialization during VM startup.
69 address LIR_Assembler::float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
70 address LIR_Assembler::double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
71 address LIR_Assembler::float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
72 address LIR_Assembler::double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
73
74
75 NEEDS_CLEANUP // remove this definitions ?
76 const Register SYNC_header = rax; // synchronization header
77 const Register SHIFT_count = rcx; // where count for shift operations must be
78
79 #define __ _masm->
80
81
82 static void select_different_registers(Register preserve,
83 Register extra,
84 Register &tmp1,
85 Register &tmp2) {
86 if (tmp1 == preserve) {
87 assert_different_registers(tmp1, tmp2, extra);
88 tmp1 = extra;
89 } else if (tmp2 == preserve) {
90 assert_different_registers(tmp1, tmp2, extra);
91 tmp2 = extra;
92 }
93 assert_different_registers(preserve, tmp1, tmp2);
94 }
95
96
97
98 static void select_different_registers(Register preserve,
99 Register extra,
100 Register &tmp1,
101 Register &tmp2,
102 Register &tmp3) {
103 if (tmp1 == preserve) {
104 assert_different_registers(tmp1, tmp2, tmp3, extra);
105 tmp1 = extra;
106 } else if (tmp2 == preserve) {
107 assert_different_registers(tmp1, tmp2, tmp3, extra);
108 tmp2 = extra;
109 } else if (tmp3 == preserve) {
110 assert_different_registers(tmp1, tmp2, tmp3, extra);
111 tmp3 = extra;
112 }
113 assert_different_registers(preserve, tmp1, tmp2, tmp3);
114 }
115
116
117
118 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
119 if (opr->is_constant()) {
120 LIR_Const* constant = opr->as_constant_ptr();
121 switch (constant->type()) {
122 case T_INT: {
123 return true;
124 }
125
126 default:
127 return false;
128 }
129 }
130 return false;
131 }
132
133
134 LIR_Opr LIR_Assembler::receiverOpr() {
135 return FrameMap::receiver_opr;
136 }
137
138 LIR_Opr LIR_Assembler::osrBufferPointer() {
139 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
140 }
141
142 //--------------fpu register translations-----------------------
143
144
145 address LIR_Assembler::float_constant(float f) {
146 address const_addr = __ float_constant(f);
147 if (const_addr == nullptr) {
148 bailout("const section overflow");
149 return __ code()->consts()->start();
150 } else {
151 return const_addr;
152 }
153 }
154
155
156 address LIR_Assembler::double_constant(double d) {
157 address const_addr = __ double_constant(d);
158 if (const_addr == nullptr) {
159 bailout("const section overflow");
160 return __ code()->consts()->start();
161 } else {
162 return const_addr;
163 }
164 }
165
166 void LIR_Assembler::breakpoint() {
167 __ int3();
168 }
169
170 void LIR_Assembler::push(LIR_Opr opr) {
171 if (opr->is_single_cpu()) {
172 __ push_reg(opr->as_register());
173 } else if (opr->is_double_cpu()) {
174 __ push_reg(opr->as_register_lo());
175 } else if (opr->is_stack()) {
176 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
177 } else if (opr->is_constant()) {
178 LIR_Const* const_opr = opr->as_constant_ptr();
179 if (const_opr->type() == T_OBJECT) {
180 __ push_oop(const_opr->as_jobject(), rscratch1);
181 } else if (const_opr->type() == T_INT) {
182 __ push_jint(const_opr->as_jint());
183 } else {
184 ShouldNotReachHere();
185 }
186
187 } else {
188 ShouldNotReachHere();
189 }
190 }
191
192 void LIR_Assembler::pop(LIR_Opr opr) {
193 if (opr->is_single_cpu()) {
194 __ pop_reg(opr->as_register());
195 } else {
196 ShouldNotReachHere();
197 }
198 }
199
200 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
201 return addr->base()->is_illegal() && addr->index()->is_illegal();
202 }
203
204 //-------------------------------------------
205
206 Address LIR_Assembler::as_Address(LIR_Address* addr) {
207 return as_Address(addr, rscratch1);
208 }
209
210 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
211 if (addr->base()->is_illegal()) {
212 assert(addr->index()->is_illegal(), "must be illegal too");
213 AddressLiteral laddr((address)addr->disp(), relocInfo::none);
214 if (! __ reachable(laddr)) {
215 __ movptr(tmp, laddr.addr());
216 Address res(tmp, 0);
217 return res;
218 } else {
219 return __ as_Address(laddr);
220 }
221 }
222
223 Register base = addr->base()->as_pointer_register();
224
225 if (addr->index()->is_illegal()) {
226 return Address( base, addr->disp());
227 } else if (addr->index()->is_cpu_register()) {
228 Register index = addr->index()->as_pointer_register();
229 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
230 } else if (addr->index()->is_constant()) {
231 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
232 assert(Assembler::is_simm32(addr_offset), "must be");
233
234 return Address(base, addr_offset);
235 } else {
236 Unimplemented();
237 return Address();
238 }
239 }
240
241
242 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
243 Address base = as_Address(addr);
244 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
245 }
246
247
248 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
249 return as_Address(addr);
250 }
251
252
253 void LIR_Assembler::osr_entry() {
254 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
255 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
256 ValueStack* entry_state = osr_entry->state();
257 int number_of_locks = entry_state->locks_size();
258
259 // we jump here if osr happens with the interpreter
260 // state set up to continue at the beginning of the
261 // loop that triggered osr - in particular, we have
262 // the following registers setup:
263 //
264 // rcx: osr buffer
265 //
266
267 // build frame
268 ciMethod* m = compilation()->method();
269 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
270
271 // OSR buffer is
272 //
273 // locals[nlocals-1..0]
274 // monitors[0..number_of_locks]
275 //
276 // locals is a direct copy of the interpreter frame so in the osr buffer
277 // so first slot in the local array is the last local from the interpreter
278 // and last slot is local[0] (receiver) from the interpreter
279 //
280 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
281 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
282 // in the interpreter frame (the method lock if a sync method)
283
284 // Initialize monitors in the compiled activation.
285 // rcx: pointer to osr buffer
286 //
287 // All other registers are dead at this point and the locals will be
288 // copied into place by code emitted in the IR.
289
290 Register OSR_buf = osrBufferPointer()->as_pointer_register();
291 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
292 int monitor_offset = BytesPerWord * method()->max_locals() +
293 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
294 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
295 // the OSR buffer using 2 word entries: first the lock and then
296 // the oop.
297 for (int i = 0; i < number_of_locks; i++) {
298 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
299 #ifdef ASSERT
300 // verify the interpreter's monitor has a non-null object
301 {
302 Label L;
303 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
304 __ jcc(Assembler::notZero, L);
305 __ stop("locked object is null");
306 __ bind(L);
307 }
308 #endif
309 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
310 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
311 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
312 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
313 }
314 }
315 }
316
317
318 // inline cache check; done before the frame is built.
319 int LIR_Assembler::check_icache() {
320 return __ ic_check(CodeEntryAlignment);
321 }
322
323 void LIR_Assembler::clinit_barrier(ciMethod* method) {
324 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
325 assert(!method->holder()->is_not_initialized(), "initialization should have been started");
326
327 Label L_skip_barrier;
328 Register klass = rscratch1;
329
330 __ mov_metadata(klass, method->holder()->constant_encoding());
331 __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
332
333 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
334
335 __ bind(L_skip_barrier);
336 }
337
338 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
339 jobject o = nullptr;
340 PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
341 __ movoop(reg, o);
342 patching_epilog(patch, lir_patch_normal, reg, info);
343 }
344
345 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
346 Metadata* o = nullptr;
347 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
348 __ mov_metadata(reg, o);
349 patching_epilog(patch, lir_patch_normal, reg, info);
350 }
351
352 // This specifies the rsp decrement needed to build the frame
353 int LIR_Assembler::initial_frame_size_in_bytes() const {
354 // if rounding, must let FrameMap know!
355
356 // The frame_map records size in slots (32bit word)
357
358 // subtract two words to account for return address and link
359 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
360 }
361
362
363 int LIR_Assembler::emit_exception_handler() {
364 // generate code for exception handler
365 address handler_base = __ start_a_stub(exception_handler_size());
366 if (handler_base == nullptr) {
367 // not enough space left for the handler
368 bailout("exception handler overflow");
369 return -1;
370 }
371
372 int offset = code_offset();
373
374 // the exception oop and pc are in rax, and rdx
375 // no other registers need to be preserved, so invalidate them
376 __ invalidate_registers(false, true, true, false, true, true);
377
378 // check that there is really an exception
379 __ verify_not_null_oop(rax);
380
381 // search an exception handler (rax: exception oop, rdx: throwing pc)
382 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
383 __ should_not_reach_here();
384 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
385 __ end_a_stub();
386
387 return offset;
388 }
389
390
391 // Emit the code to remove the frame from the stack in the exception
392 // unwind path.
393 int LIR_Assembler::emit_unwind_handler() {
394 #ifndef PRODUCT
395 if (CommentedAssembly) {
396 _masm->block_comment("Unwind handler");
397 }
398 #endif
399
400 int offset = code_offset();
401
402 // Fetch the exception from TLS and clear out exception related thread state
403 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
404 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
405 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
406
407 __ bind(_unwind_handler_entry);
408 __ verify_not_null_oop(rax);
409 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
410 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
411 }
412
413 // Perform needed unlocking
414 MonitorExitStub* stub = nullptr;
415 if (method()->is_synchronized()) {
416 monitor_address(0, FrameMap::rax_opr);
417 stub = new MonitorExitStub(FrameMap::rax_opr, 0);
418 __ unlock_object(rdi, rsi, rax, *stub->entry());
419 __ bind(*stub->continuation());
420 }
421
422 if (compilation()->env()->dtrace_method_probes()) {
423 __ mov(rdi, r15_thread);
424 __ mov_metadata(rsi, method()->constant_encoding());
425 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
426 }
427
428 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
429 __ mov(rax, rbx); // Restore the exception
430 }
431
432 // remove the activation and dispatch to the unwind handler
433 __ remove_frame(initial_frame_size_in_bytes());
434 __ jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
435
436 // Emit the slow path assembly
437 if (stub != nullptr) {
438 stub->emit_code(this);
439 }
440
441 return offset;
442 }
443
444
445 int LIR_Assembler::emit_deopt_handler() {
446 // generate code for exception handler
447 address handler_base = __ start_a_stub(deopt_handler_size());
448 if (handler_base == nullptr) {
449 // not enough space left for the handler
450 bailout("deopt handler overflow");
451 return -1;
452 }
453
454 int offset = code_offset();
455
456 Label start;
457 __ bind(start);
458
459 __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
460
461 int entry_offset = __ offset();
462
463 __ jmp(start);
464
465 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
466 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
467 "out of bounds read in post-call NOP check");
468 __ end_a_stub();
469
470 return entry_offset;
471 }
472
473 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
474 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
475 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
476 assert(result->fpu() == 0, "result must already be on TOS");
477 }
478
479 // Pop the stack before the safepoint code
480 __ remove_frame(initial_frame_size_in_bytes());
481
482 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
483 __ reserved_stack_check();
484 }
485
486 // Note: we do not need to round double result; float result has the right precision
487 // the poll sets the condition code, but no data registers
488
489 code_stub->set_safepoint_offset(__ offset());
490 __ relocate(relocInfo::poll_return_type);
491 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
492 __ ret(0);
493 }
494
495
496 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
497 guarantee(info != nullptr, "Shouldn't be null");
498 int offset = __ offset();
499 const Register poll_addr = rscratch1;
500 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
501 add_debug_info_for_branch(info);
502 __ relocate(relocInfo::poll_type);
503 address pre_pc = __ pc();
504 __ testl(rax, Address(poll_addr, 0));
505 address post_pc = __ pc();
506 guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length");
507 return offset;
508 }
509
510
511 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
512 if (from_reg != to_reg) __ mov(to_reg, from_reg);
513 }
514
515 void LIR_Assembler::swap_reg(Register a, Register b) {
516 __ xchgptr(a, b);
517 }
518
519
520 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
521 assert(src->is_constant(), "should not call otherwise");
522 assert(dest->is_register(), "should not call otherwise");
523 LIR_Const* c = src->as_constant_ptr();
524
525 switch (c->type()) {
526 case T_INT: {
527 assert(patch_code == lir_patch_none, "no patching handled here");
528 __ movl(dest->as_register(), c->as_jint());
529 break;
530 }
531
532 case T_ADDRESS: {
533 assert(patch_code == lir_patch_none, "no patching handled here");
534 __ movptr(dest->as_register(), c->as_jint());
535 break;
536 }
537
538 case T_LONG: {
539 assert(patch_code == lir_patch_none, "no patching handled here");
540 #if INCLUDE_CDS
541 if (AOTCodeCache::is_on_for_dump()) {
542 address b = c->as_pointer();
543 if (b == (address)ThreadIdentifier::unsafe_offset()) {
544 __ lea(dest->as_register_lo(), ExternalAddress(b));
545 break;
546 }
547 if (AOTRuntimeConstants::contains(b)) {
548 __ load_aotrc_address(dest->as_register_lo(), b);
549 break;
550 }
551 }
552 #endif
553 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
554 break;
555 }
556
557 case T_OBJECT: {
558 if (patch_code != lir_patch_none) {
559 jobject2reg_with_patching(dest->as_register(), info);
560 } else {
561 __ movoop(dest->as_register(), c->as_jobject());
562 }
563 break;
564 }
565
566 case T_METADATA: {
567 if (patch_code != lir_patch_none) {
568 klass2reg_with_patching(dest->as_register(), info);
569 } else {
570 __ mov_metadata(dest->as_register(), c->as_metadata());
571 }
572 break;
573 }
574
575 case T_FLOAT: {
576 if (dest->is_single_xmm()) {
577 if (UseAVX <= 2 && c->is_zero_float()) {
578 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
579 } else {
580 __ movflt(dest->as_xmm_float_reg(),
581 InternalAddress(float_constant(c->as_jfloat())));
582 }
583 } else {
584 ShouldNotReachHere();
585 }
586 break;
587 }
588
589 case T_DOUBLE: {
590 if (dest->is_double_xmm()) {
591 if (UseAVX <= 2 && c->is_zero_double()) {
592 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
593 } else {
594 __ movdbl(dest->as_xmm_double_reg(),
595 InternalAddress(double_constant(c->as_jdouble())));
596 }
597 } else {
598 ShouldNotReachHere();
599 }
600 break;
601 }
602
603 default:
604 ShouldNotReachHere();
605 }
606 }
607
608 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
609 assert(src->is_constant(), "should not call otherwise");
610 assert(dest->is_stack(), "should not call otherwise");
611 LIR_Const* c = src->as_constant_ptr();
612
613 switch (c->type()) {
614 case T_INT: // fall through
615 case T_FLOAT:
616 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
617 break;
618
619 case T_ADDRESS:
620 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
621 break;
622
623 case T_OBJECT:
624 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
625 break;
626
627 case T_LONG: // fall through
628 case T_DOUBLE:
629 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
630 lo_word_offset_in_bytes),
631 (intptr_t)c->as_jlong_bits(),
632 rscratch1);
633 break;
634
635 default:
636 ShouldNotReachHere();
637 }
638 }
639
640 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
641 assert(src->is_constant(), "should not call otherwise");
642 assert(dest->is_address(), "should not call otherwise");
643 LIR_Const* c = src->as_constant_ptr();
644 LIR_Address* addr = dest->as_address_ptr();
645
646 int null_check_here = code_offset();
647 switch (type) {
648 case T_INT: // fall through
649 case T_FLOAT:
650 __ movl(as_Address(addr), c->as_jint_bits());
651 break;
652
653 case T_ADDRESS:
654 __ movptr(as_Address(addr), c->as_jint_bits());
655 break;
656
657 case T_OBJECT: // fall through
658 case T_ARRAY:
659 if (c->as_jobject() == nullptr) {
660 if (UseCompressedOops && !wide) {
661 __ movl(as_Address(addr), NULL_WORD);
662 } else {
663 __ xorptr(rscratch1, rscratch1);
664 null_check_here = code_offset();
665 __ movptr(as_Address(addr), rscratch1);
666 }
667 } else {
668 if (is_literal_address(addr)) {
669 ShouldNotReachHere();
670 __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1);
671 } else {
672 __ movoop(rscratch1, c->as_jobject());
673 if (UseCompressedOops && !wide) {
674 __ encode_heap_oop(rscratch1);
675 null_check_here = code_offset();
676 __ movl(as_Address_lo(addr), rscratch1);
677 } else {
678 null_check_here = code_offset();
679 __ movptr(as_Address_lo(addr), rscratch1);
680 }
681 }
682 }
683 break;
684
685 case T_LONG: // fall through
686 case T_DOUBLE:
687 if (is_literal_address(addr)) {
688 ShouldNotReachHere();
689 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
690 } else {
691 __ movptr(r10, (intptr_t)c->as_jlong_bits());
692 null_check_here = code_offset();
693 __ movptr(as_Address_lo(addr), r10);
694 }
695 break;
696
697 case T_BOOLEAN: // fall through
698 case T_BYTE:
699 __ movb(as_Address(addr), c->as_jint() & 0xFF);
700 break;
701
702 case T_CHAR: // fall through
703 case T_SHORT:
704 __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
705 break;
706
707 default:
708 ShouldNotReachHere();
709 };
710
711 if (info != nullptr) {
712 add_debug_info_for_null_check(null_check_here, info);
713 }
714 }
715
716
717 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
718 assert(src->is_register(), "should not call otherwise");
719 assert(dest->is_register(), "should not call otherwise");
720
721 // move between cpu-registers
722 if (dest->is_single_cpu()) {
723 if (src->type() == T_LONG) {
724 // Can do LONG -> OBJECT
725 move_regs(src->as_register_lo(), dest->as_register());
726 return;
727 }
728 assert(src->is_single_cpu(), "must match");
729 if (src->type() == T_OBJECT) {
730 __ verify_oop(src->as_register());
731 }
732 move_regs(src->as_register(), dest->as_register());
733
734 } else if (dest->is_double_cpu()) {
735 if (is_reference_type(src->type())) {
736 // Surprising to me but we can see move of a long to t_object
737 __ verify_oop(src->as_register());
738 move_regs(src->as_register(), dest->as_register_lo());
739 return;
740 }
741 assert(src->is_double_cpu(), "must match");
742 Register f_lo = src->as_register_lo();
743 Register f_hi = src->as_register_hi();
744 Register t_lo = dest->as_register_lo();
745 Register t_hi = dest->as_register_hi();
746 assert(f_hi == f_lo, "must be same");
747 assert(t_hi == t_lo, "must be same");
748 move_regs(f_lo, t_lo);
749
750 // move between xmm-registers
751 } else if (dest->is_single_xmm()) {
752 assert(src->is_single_xmm(), "must match");
753 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
754 } else if (dest->is_double_xmm()) {
755 assert(src->is_double_xmm(), "must match");
756 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
757
758 } else {
759 ShouldNotReachHere();
760 }
761 }
762
763 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
764 assert(src->is_register(), "should not call otherwise");
765 assert(dest->is_stack(), "should not call otherwise");
766
767 if (src->is_single_cpu()) {
768 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
769 if (is_reference_type(type)) {
770 __ verify_oop(src->as_register());
771 __ movptr (dst, src->as_register());
772 } else if (type == T_METADATA || type == T_ADDRESS) {
773 __ movptr (dst, src->as_register());
774 } else {
775 __ movl (dst, src->as_register());
776 }
777
778 } else if (src->is_double_cpu()) {
779 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
780 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
781 __ movptr (dstLO, src->as_register_lo());
782
783 } else if (src->is_single_xmm()) {
784 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
785 __ movflt(dst_addr, src->as_xmm_float_reg());
786
787 } else if (src->is_double_xmm()) {
788 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
789 __ movdbl(dst_addr, src->as_xmm_double_reg());
790
791 } else {
792 ShouldNotReachHere();
793 }
794 }
795
796
797 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
798 LIR_Address* to_addr = dest->as_address_ptr();
799 PatchingStub* patch = nullptr;
800 Register compressed_src = rscratch1;
801
802 if (is_reference_type(type)) {
803 __ verify_oop(src->as_register());
804 if (UseCompressedOops && !wide) {
805 __ movptr(compressed_src, src->as_register());
806 __ encode_heap_oop(compressed_src);
807 if (patch_code != lir_patch_none) {
808 info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
809 }
810 }
811 }
812
813 if (patch_code != lir_patch_none) {
814 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
815 Address toa = as_Address(to_addr);
816 assert(toa.disp() != 0, "must have");
817 }
818
819 int null_check_here = code_offset();
820 switch (type) {
821 case T_FLOAT: {
822 assert(src->is_single_xmm(), "not a float");
823 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
824 break;
825 }
826
827 case T_DOUBLE: {
828 assert(src->is_double_xmm(), "not a double");
829 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
830 break;
831 }
832
833 case T_ARRAY: // fall through
834 case T_OBJECT: // fall through
835 if (UseCompressedOops && !wide) {
836 __ movl(as_Address(to_addr), compressed_src);
837 } else {
838 __ movptr(as_Address(to_addr), src->as_register());
839 }
840 break;
841 case T_ADDRESS:
842 __ movptr(as_Address(to_addr), src->as_register());
843 break;
844 case T_INT:
845 __ movl(as_Address(to_addr), src->as_register());
846 break;
847
848 case T_LONG: {
849 Register from_lo = src->as_register_lo();
850 Register from_hi = src->as_register_hi();
851 __ movptr(as_Address_lo(to_addr), from_lo);
852 break;
853 }
854
855 case T_BYTE: // fall through
856 case T_BOOLEAN: {
857 Register src_reg = src->as_register();
858 Address dst_addr = as_Address(to_addr);
859 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
860 __ movb(dst_addr, src_reg);
861 break;
862 }
863
864 case T_CHAR: // fall through
865 case T_SHORT:
866 __ movw(as_Address(to_addr), src->as_register());
867 break;
868
869 default:
870 ShouldNotReachHere();
871 }
872 if (info != nullptr) {
873 add_debug_info_for_null_check(null_check_here, info);
874 }
875
876 if (patch_code != lir_patch_none) {
877 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
878 }
879 }
880
881
882 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
883 assert(src->is_stack(), "should not call otherwise");
884 assert(dest->is_register(), "should not call otherwise");
885
886 if (dest->is_single_cpu()) {
887 if (is_reference_type(type)) {
888 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
889 __ verify_oop(dest->as_register());
890 } else if (type == T_METADATA || type == T_ADDRESS) {
891 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
892 } else {
893 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
894 }
895
896 } else if (dest->is_double_cpu()) {
897 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
898 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
899 __ movptr(dest->as_register_lo(), src_addr_LO);
900
901 } else if (dest->is_single_xmm()) {
902 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
903 __ movflt(dest->as_xmm_float_reg(), src_addr);
904
905 } else if (dest->is_double_xmm()) {
906 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
907 __ movdbl(dest->as_xmm_double_reg(), src_addr);
908
909 } else {
910 ShouldNotReachHere();
911 }
912 }
913
914
915 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
916 if (src->is_single_stack()) {
917 if (is_reference_type(type)) {
918 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
919 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
920 } else {
921 //no pushl on 64bits
922 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
923 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
924 }
925
926 } else if (src->is_double_stack()) {
927 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
928 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
929
930 } else {
931 ShouldNotReachHere();
932 }
933 }
934
935
936 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
937 assert(src->is_address(), "should not call otherwise");
938 assert(dest->is_register(), "should not call otherwise");
939
940 LIR_Address* addr = src->as_address_ptr();
941 Address from_addr = as_Address(addr);
942
943 if (addr->base()->type() == T_OBJECT) {
944 __ verify_oop(addr->base()->as_pointer_register());
945 }
946
947 switch (type) {
948 case T_BOOLEAN: // fall through
949 case T_BYTE: // fall through
950 case T_CHAR: // fall through
951 case T_SHORT:
952 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
953 // on pre P6 processors we may get partial register stalls
954 // so blow away the value of to_rinfo before loading a
955 // partial word into it. Do it here so that it precedes
956 // the potential patch point below.
957 __ xorptr(dest->as_register(), dest->as_register());
958 }
959 break;
960 default:
961 break;
962 }
963
964 PatchingStub* patch = nullptr;
965 if (patch_code != lir_patch_none) {
966 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
967 assert(from_addr.disp() != 0, "must have");
968 }
969 if (info != nullptr) {
970 add_debug_info_for_null_check_here(info);
971 }
972
973 switch (type) {
974 case T_FLOAT: {
975 if (dest->is_single_xmm()) {
976 __ movflt(dest->as_xmm_float_reg(), from_addr);
977 } else {
978 ShouldNotReachHere();
979 }
980 break;
981 }
982
983 case T_DOUBLE: {
984 if (dest->is_double_xmm()) {
985 __ movdbl(dest->as_xmm_double_reg(), from_addr);
986 } else {
987 ShouldNotReachHere();
988 }
989 break;
990 }
991
992 case T_OBJECT: // fall through
993 case T_ARRAY: // fall through
994 if (UseCompressedOops && !wide) {
995 __ movl(dest->as_register(), from_addr);
996 } else {
997 __ movptr(dest->as_register(), from_addr);
998 }
999 break;
1000
1001 case T_ADDRESS:
1002 __ movptr(dest->as_register(), from_addr);
1003 break;
1004 case T_INT:
1005 __ movl(dest->as_register(), from_addr);
1006 break;
1007
1008 case T_LONG: {
1009 Register to_lo = dest->as_register_lo();
1010 Register to_hi = dest->as_register_hi();
1011 __ movptr(to_lo, as_Address_lo(addr));
1012 break;
1013 }
1014
1015 case T_BOOLEAN: // fall through
1016 case T_BYTE: {
1017 Register dest_reg = dest->as_register();
1018 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1019 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1020 __ movsbl(dest_reg, from_addr);
1021 } else {
1022 __ movb(dest_reg, from_addr);
1023 __ shll(dest_reg, 24);
1024 __ sarl(dest_reg, 24);
1025 }
1026 break;
1027 }
1028
1029 case T_CHAR: {
1030 Register dest_reg = dest->as_register();
1031 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1032 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1033 __ movzwl(dest_reg, from_addr);
1034 } else {
1035 __ movw(dest_reg, from_addr);
1036 }
1037 break;
1038 }
1039
1040 case T_SHORT: {
1041 Register dest_reg = dest->as_register();
1042 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1043 __ movswl(dest_reg, from_addr);
1044 } else {
1045 __ movw(dest_reg, from_addr);
1046 __ shll(dest_reg, 16);
1047 __ sarl(dest_reg, 16);
1048 }
1049 break;
1050 }
1051
1052 default:
1053 ShouldNotReachHere();
1054 }
1055
1056 if (patch != nullptr) {
1057 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1058 }
1059
1060 if (is_reference_type(type)) {
1061 if (UseCompressedOops && !wide) {
1062 __ decode_heap_oop(dest->as_register());
1063 }
1064
1065 __ verify_oop(dest->as_register());
1066 }
1067 }
1068
1069
1070 NEEDS_CLEANUP; // This could be static?
1071 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1072 int elem_size = type2aelembytes(type);
1073 switch (elem_size) {
1074 case 1: return Address::times_1;
1075 case 2: return Address::times_2;
1076 case 4: return Address::times_4;
1077 case 8: return Address::times_8;
1078 }
1079 ShouldNotReachHere();
1080 return Address::no_scale;
1081 }
1082
1083
1084 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1085 switch (op->code()) {
1086 case lir_idiv:
1087 case lir_irem:
1088 arithmetic_idiv(op->code(),
1089 op->in_opr1(),
1090 op->in_opr2(),
1091 op->in_opr3(),
1092 op->result_opr(),
1093 op->info());
1094 break;
1095 case lir_fmad:
1096 __ fmad(op->result_opr()->as_xmm_double_reg(),
1097 op->in_opr1()->as_xmm_double_reg(),
1098 op->in_opr2()->as_xmm_double_reg(),
1099 op->in_opr3()->as_xmm_double_reg());
1100 break;
1101 case lir_fmaf:
1102 __ fmaf(op->result_opr()->as_xmm_float_reg(),
1103 op->in_opr1()->as_xmm_float_reg(),
1104 op->in_opr2()->as_xmm_float_reg(),
1105 op->in_opr3()->as_xmm_float_reg());
1106 break;
1107 default: ShouldNotReachHere(); break;
1108 }
1109 }
1110
1111 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1112 #ifdef ASSERT
1113 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1114 if (op->block() != nullptr) _branch_target_blocks.append(op->block());
1115 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1116 #endif
1117
1118 if (op->cond() == lir_cond_always) {
1119 if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1120 __ jmp (*(op->label()));
1121 } else {
1122 Assembler::Condition acond = Assembler::zero;
1123 if (op->code() == lir_cond_float_branch) {
1124 assert(op->ublock() != nullptr, "must have unordered successor");
1125 __ jcc(Assembler::parity, *(op->ublock()->label()));
1126 switch(op->cond()) {
1127 case lir_cond_equal: acond = Assembler::equal; break;
1128 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1129 case lir_cond_less: acond = Assembler::below; break;
1130 case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1131 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1132 case lir_cond_greater: acond = Assembler::above; break;
1133 default: ShouldNotReachHere();
1134 }
1135 } else {
1136 switch (op->cond()) {
1137 case lir_cond_equal: acond = Assembler::equal; break;
1138 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1139 case lir_cond_less: acond = Assembler::less; break;
1140 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1141 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1142 case lir_cond_greater: acond = Assembler::greater; break;
1143 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1144 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1145 default: ShouldNotReachHere();
1146 }
1147 }
1148 __ jcc(acond,*(op->label()));
1149 }
1150 }
1151
1152 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1153 LIR_Opr src = op->in_opr();
1154 LIR_Opr dest = op->result_opr();
1155
1156 switch (op->bytecode()) {
1157 case Bytecodes::_i2l:
1158 __ movl2ptr(dest->as_register_lo(), src->as_register());
1159 break;
1160
1161 case Bytecodes::_l2i:
1162 __ movl(dest->as_register(), src->as_register_lo());
1163 break;
1164
1165 case Bytecodes::_i2b:
1166 move_regs(src->as_register(), dest->as_register());
1167 __ sign_extend_byte(dest->as_register());
1168 break;
1169
1170 case Bytecodes::_i2c:
1171 move_regs(src->as_register(), dest->as_register());
1172 __ andl(dest->as_register(), 0xFFFF);
1173 break;
1174
1175 case Bytecodes::_i2s:
1176 move_regs(src->as_register(), dest->as_register());
1177 __ sign_extend_short(dest->as_register());
1178 break;
1179
1180 case Bytecodes::_f2d:
1181 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1182 break;
1183
1184 case Bytecodes::_d2f:
1185 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1186 break;
1187
1188 case Bytecodes::_i2f:
1189 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1190 break;
1191
1192 case Bytecodes::_i2d:
1193 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1194 break;
1195
1196 case Bytecodes::_l2f:
1197 __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo());
1198 break;
1199
1200 case Bytecodes::_l2d:
1201 __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo());
1202 break;
1203
1204 case Bytecodes::_f2i:
1205 __ convert_f2i(dest->as_register(), src->as_xmm_float_reg());
1206 break;
1207
1208 case Bytecodes::_d2i:
1209 __ convert_d2i(dest->as_register(), src->as_xmm_double_reg());
1210 break;
1211
1212 case Bytecodes::_f2l:
1213 __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg());
1214 break;
1215
1216 case Bytecodes::_d2l:
1217 __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg());
1218 break;
1219
1220 default: ShouldNotReachHere();
1221 }
1222 }
1223
1224 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1225 if (op->init_check()) {
1226 add_debug_info_for_null_check_here(op->stub()->info());
1227 // init_state needs acquire, but x86 is TSO, and so we are already good.
1228 __ cmpb(Address(op->klass()->as_register(),
1229 InstanceKlass::init_state_offset()),
1230 InstanceKlass::fully_initialized);
1231 __ jcc(Assembler::notEqual, *op->stub()->entry());
1232 }
1233 __ allocate_object(op->obj()->as_register(),
1234 op->tmp1()->as_register(),
1235 op->tmp2()->as_register(),
1236 op->header_size(),
1237 op->object_size(),
1238 op->klass()->as_register(),
1239 *op->stub()->entry());
1240 __ bind(*op->stub()->continuation());
1241 }
1242
1243 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1244 Register len = op->len()->as_register();
1245 __ movslq(len, len);
1246
1247 if (UseSlowPath ||
1248 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1249 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1250 __ jmp(*op->stub()->entry());
1251 } else {
1252 Register tmp1 = op->tmp1()->as_register();
1253 Register tmp2 = op->tmp2()->as_register();
1254 Register tmp3 = op->tmp3()->as_register();
1255 if (len == tmp1) {
1256 tmp1 = tmp3;
1257 } else if (len == tmp2) {
1258 tmp2 = tmp3;
1259 } else if (len == tmp3) {
1260 // everything is ok
1261 } else {
1262 __ mov(tmp3, len);
1263 }
1264 __ allocate_array(op->obj()->as_register(),
1265 len,
1266 tmp1,
1267 tmp2,
1268 arrayOopDesc::base_offset_in_bytes(op->type()),
1269 array_element_size(op->type()),
1270 op->klass()->as_register(),
1271 *op->stub()->entry(),
1272 op->zero_array());
1273 }
1274 __ bind(*op->stub()->continuation());
1275 }
1276
1277 void LIR_Assembler::type_profile_helper(Register mdo,
1278 ciMethodData *md, ciProfileData *data,
1279 Register recv) {
1280 int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
1281 __ profile_receiver_type(recv, mdo, mdp_offset);
1282 }
1283
1284 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1285 // we always need a stub for the failure case.
1286 CodeStub* stub = op->stub();
1287 Register obj = op->object()->as_register();
1288 Register k_RInfo = op->tmp1()->as_register();
1289 Register klass_RInfo = op->tmp2()->as_register();
1290 Register dst = op->result_opr()->as_register();
1291 ciKlass* k = op->klass();
1292 Register Rtmp1 = noreg;
1293 Register tmp_load_klass = rscratch1;
1294
1295 // check if it needs to be profiled
1296 ciMethodData* md = nullptr;
1297 ciProfileData* data = nullptr;
1298
1299 if (op->should_profile()) {
1300 ciMethod* method = op->profiled_method();
1301 assert(method != nullptr, "Should have method");
1302 int bci = op->profiled_bci();
1303 md = method->method_data_or_null();
1304 assert(md != nullptr, "Sanity");
1305 data = md->bci_to_data(bci);
1306 assert(data != nullptr, "need data for type check");
1307 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1308 }
1309 Label* success_target = success;
1310 Label* failure_target = failure;
1311
1312 if (obj == k_RInfo) {
1313 k_RInfo = dst;
1314 } else if (obj == klass_RInfo) {
1315 klass_RInfo = dst;
1316 }
1317 if (k->is_loaded() && !UseCompressedClassPointers) {
1318 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1319 } else {
1320 Rtmp1 = op->tmp3()->as_register();
1321 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1322 }
1323
1324 assert_different_registers(obj, k_RInfo, klass_RInfo);
1325
1326 __ testptr(obj, obj);
1327 if (op->should_profile()) {
1328 Label not_null;
1329 Register mdo = klass_RInfo;
1330 __ mov_metadata(mdo, md->constant_encoding());
1331 __ jccb(Assembler::notEqual, not_null);
1332 // Object is null; update MDO and exit
1333 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1334 int header_bits = BitData::null_seen_byte_constant();
1335 __ orb(data_addr, header_bits);
1336 __ jmp(*obj_is_null);
1337 __ bind(not_null);
1338
1339 Register recv = k_RInfo;
1340 __ load_klass(recv, obj, tmp_load_klass);
1341 type_profile_helper(mdo, md, data, recv);
1342 } else {
1343 __ jcc(Assembler::equal, *obj_is_null);
1344 }
1345
1346 if (!k->is_loaded()) {
1347 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1348 } else {
1349 __ mov_metadata(k_RInfo, k->constant_encoding());
1350 }
1351 __ verify_oop(obj);
1352
1353 if (op->fast_check()) {
1354 // get object class
1355 // not a safepoint as obj null check happens earlier
1356 if (UseCompressedClassPointers) {
1357 __ load_klass(Rtmp1, obj, tmp_load_klass);
1358 __ cmpptr(k_RInfo, Rtmp1);
1359 } else {
1360 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1361 }
1362 __ jcc(Assembler::notEqual, *failure_target);
1363 // successful cast, fall through to profile or jump
1364 } else {
1365 // get object class
1366 // not a safepoint as obj null check happens earlier
1367 __ load_klass(klass_RInfo, obj, tmp_load_klass);
1368 if (k->is_loaded()) {
1369 // See if we get an immediate positive hit
1370 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1371 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1372 __ jcc(Assembler::notEqual, *failure_target);
1373 // successful cast, fall through to profile or jump
1374 } else {
1375 // See if we get an immediate positive hit
1376 __ jcc(Assembler::equal, *success_target);
1377 // check for self
1378 __ cmpptr(klass_RInfo, k_RInfo);
1379 __ jcc(Assembler::equal, *success_target);
1380
1381 __ push_ppx(klass_RInfo);
1382 __ push_ppx(k_RInfo);
1383 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1384 __ pop_ppx(klass_RInfo);
1385 __ pop_ppx(klass_RInfo);
1386 // result is a boolean
1387 __ testl(klass_RInfo, klass_RInfo);
1388 __ jcc(Assembler::equal, *failure_target);
1389 // successful cast, fall through to profile or jump
1390 }
1391 } else {
1392 // perform the fast part of the checking logic
1393 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1394 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1395 __ push_ppx(klass_RInfo);
1396 __ push_ppx(k_RInfo);
1397 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1398 __ pop_ppx(klass_RInfo);
1399 __ pop_ppx(k_RInfo);
1400 // result is a boolean
1401 __ testl(k_RInfo, k_RInfo);
1402 __ jcc(Assembler::equal, *failure_target);
1403 // successful cast, fall through to profile or jump
1404 }
1405 }
1406 __ jmp(*success);
1407 }
1408
1409
1410 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1411 Register tmp_load_klass = rscratch1;
1412 LIR_Code code = op->code();
1413 if (code == lir_store_check) {
1414 Register value = op->object()->as_register();
1415 Register array = op->array()->as_register();
1416 Register k_RInfo = op->tmp1()->as_register();
1417 Register klass_RInfo = op->tmp2()->as_register();
1418 Register Rtmp1 = op->tmp3()->as_register();
1419
1420 CodeStub* stub = op->stub();
1421
1422 // check if it needs to be profiled
1423 ciMethodData* md = nullptr;
1424 ciProfileData* data = nullptr;
1425
1426 if (op->should_profile()) {
1427 ciMethod* method = op->profiled_method();
1428 assert(method != nullptr, "Should have method");
1429 int bci = op->profiled_bci();
1430 md = method->method_data_or_null();
1431 assert(md != nullptr, "Sanity");
1432 data = md->bci_to_data(bci);
1433 assert(data != nullptr, "need data for type check");
1434 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1435 }
1436 Label done;
1437 Label* success_target = &done;
1438 Label* failure_target = stub->entry();
1439
1440 __ testptr(value, value);
1441 if (op->should_profile()) {
1442 Label not_null;
1443 Register mdo = klass_RInfo;
1444 __ mov_metadata(mdo, md->constant_encoding());
1445 __ jccb(Assembler::notEqual, not_null);
1446 // Object is null; update MDO and exit
1447 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1448 int header_bits = BitData::null_seen_byte_constant();
1449 __ orb(data_addr, header_bits);
1450 __ jmp(done);
1451 __ bind(not_null);
1452
1453 Register recv = k_RInfo;
1454 __ load_klass(recv, value, tmp_load_klass);
1455 type_profile_helper(mdo, md, data, recv);
1456 } else {
1457 __ jcc(Assembler::equal, done);
1458 }
1459
1460 add_debug_info_for_null_check_here(op->info_for_exception());
1461 __ load_klass(k_RInfo, array, tmp_load_klass);
1462 __ load_klass(klass_RInfo, value, tmp_load_klass);
1463
1464 // get instance klass (it's already uncompressed)
1465 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1466 // perform the fast part of the checking logic
1467 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1468 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1469 __ push_ppx(klass_RInfo);
1470 __ push_ppx(k_RInfo);
1471 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1472 __ pop_ppx(klass_RInfo);
1473 __ pop_ppx(k_RInfo);
1474 // result is a boolean
1475 __ testl(k_RInfo, k_RInfo);
1476 __ jcc(Assembler::equal, *failure_target);
1477 // fall through to the success case
1478
1479 __ bind(done);
1480 } else
1481 if (code == lir_checkcast) {
1482 Register obj = op->object()->as_register();
1483 Register dst = op->result_opr()->as_register();
1484 Label success;
1485 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1486 __ bind(success);
1487 if (dst != obj) {
1488 __ mov(dst, obj);
1489 }
1490 } else
1491 if (code == lir_instanceof) {
1492 Register obj = op->object()->as_register();
1493 Register dst = op->result_opr()->as_register();
1494 Label success, failure, done;
1495 emit_typecheck_helper(op, &success, &failure, &failure);
1496 __ bind(failure);
1497 __ xorptr(dst, dst);
1498 __ jmpb(done);
1499 __ bind(success);
1500 __ movptr(dst, 1);
1501 __ bind(done);
1502 } else {
1503 ShouldNotReachHere();
1504 }
1505
1506 }
1507
1508
1509 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1510 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1511 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1512 Register newval = op->new_value()->as_register();
1513 Register cmpval = op->cmp_value()->as_register();
1514 assert(cmpval == rax, "wrong register");
1515 assert(newval != noreg, "new val must be register");
1516 assert(cmpval != newval, "cmp and new values must be in different registers");
1517 assert(cmpval != addr, "cmp and addr must be in different registers");
1518 assert(newval != addr, "new value and addr must be in different registers");
1519
1520 if (op->code() == lir_cas_obj) {
1521 if (UseCompressedOops) {
1522 __ encode_heap_oop(cmpval);
1523 __ mov(rscratch1, newval);
1524 __ encode_heap_oop(rscratch1);
1525 __ lock();
1526 // cmpval (rax) is implicitly used by this instruction
1527 __ cmpxchgl(rscratch1, Address(addr, 0));
1528 } else {
1529 __ lock();
1530 __ cmpxchgptr(newval, Address(addr, 0));
1531 }
1532 } else {
1533 assert(op->code() == lir_cas_int, "lir_cas_int expected");
1534 __ lock();
1535 __ cmpxchgl(newval, Address(addr, 0));
1536 }
1537 } else if (op->code() == lir_cas_long) {
1538 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1539 Register newval = op->new_value()->as_register_lo();
1540 Register cmpval = op->cmp_value()->as_register_lo();
1541 assert(cmpval == rax, "wrong register");
1542 assert(newval != noreg, "new val must be register");
1543 assert(cmpval != newval, "cmp and new values must be in different registers");
1544 assert(cmpval != addr, "cmp and addr must be in different registers");
1545 assert(newval != addr, "new value and addr must be in different registers");
1546 __ lock();
1547 __ cmpxchgq(newval, Address(addr, 0));
1548 } else {
1549 Unimplemented();
1550 }
1551 }
1552
1553 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1554 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1555 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
1556
1557 Assembler::Condition acond, ncond;
1558 switch (condition) {
1559 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
1560 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
1561 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
1562 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
1563 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
1564 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
1565 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
1566 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
1567 default: acond = Assembler::equal; ncond = Assembler::notEqual;
1568 ShouldNotReachHere();
1569 }
1570
1571 if (opr1->is_cpu_register()) {
1572 reg2reg(opr1, result);
1573 } else if (opr1->is_stack()) {
1574 stack2reg(opr1, result, result->type());
1575 } else if (opr1->is_constant()) {
1576 const2reg(opr1, result, lir_patch_none, nullptr);
1577 } else {
1578 ShouldNotReachHere();
1579 }
1580
1581 if (VM_Version::supports_cmov() && !opr2->is_constant()) {
1582 // optimized version that does not require a branch
1583 if (opr2->is_single_cpu()) {
1584 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1585 __ cmov(ncond, result->as_register(), opr2->as_register());
1586 } else if (opr2->is_double_cpu()) {
1587 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1588 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1589 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
1590 } else if (opr2->is_single_stack()) {
1591 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
1592 } else if (opr2->is_double_stack()) {
1593 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
1594 } else {
1595 ShouldNotReachHere();
1596 }
1597
1598 } else {
1599 Label skip;
1600 __ jccb(acond, skip);
1601 if (opr2->is_cpu_register()) {
1602 reg2reg(opr2, result);
1603 } else if (opr2->is_stack()) {
1604 stack2reg(opr2, result, result->type());
1605 } else if (opr2->is_constant()) {
1606 const2reg(opr2, result, lir_patch_none, nullptr);
1607 } else {
1608 ShouldNotReachHere();
1609 }
1610 __ bind(skip);
1611 }
1612 }
1613
1614
1615 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1616 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1617
1618 if (left->is_single_cpu()) {
1619 assert(left == dest, "left and dest must be equal");
1620 Register lreg = left->as_register();
1621
1622 if (right->is_single_cpu()) {
1623 // cpu register - cpu register
1624 Register rreg = right->as_register();
1625 switch (code) {
1626 case lir_add: __ addl (lreg, rreg); break;
1627 case lir_sub: __ subl (lreg, rreg); break;
1628 case lir_mul: __ imull(lreg, rreg); break;
1629 default: ShouldNotReachHere();
1630 }
1631
1632 } else if (right->is_stack()) {
1633 // cpu register - stack
1634 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1635 switch (code) {
1636 case lir_add: __ addl(lreg, raddr); break;
1637 case lir_sub: __ subl(lreg, raddr); break;
1638 default: ShouldNotReachHere();
1639 }
1640
1641 } else if (right->is_constant()) {
1642 // cpu register - constant
1643 jint c = right->as_constant_ptr()->as_jint();
1644 switch (code) {
1645 case lir_add: {
1646 __ incrementl(lreg, c);
1647 break;
1648 }
1649 case lir_sub: {
1650 __ decrementl(lreg, c);
1651 break;
1652 }
1653 default: ShouldNotReachHere();
1654 }
1655
1656 } else {
1657 ShouldNotReachHere();
1658 }
1659
1660 } else if (left->is_double_cpu()) {
1661 assert(left == dest, "left and dest must be equal");
1662 Register lreg_lo = left->as_register_lo();
1663 Register lreg_hi = left->as_register_hi();
1664
1665 if (right->is_double_cpu()) {
1666 // cpu register - cpu register
1667 Register rreg_lo = right->as_register_lo();
1668 Register rreg_hi = right->as_register_hi();
1669 assert_different_registers(lreg_lo, rreg_lo);
1670 switch (code) {
1671 case lir_add:
1672 __ addptr(lreg_lo, rreg_lo);
1673 break;
1674 case lir_sub:
1675 __ subptr(lreg_lo, rreg_lo);
1676 break;
1677 case lir_mul:
1678 __ imulq(lreg_lo, rreg_lo);
1679 break;
1680 default:
1681 ShouldNotReachHere();
1682 }
1683
1684 } else if (right->is_constant()) {
1685 // cpu register - constant
1686 jlong c = right->as_constant_ptr()->as_jlong_bits();
1687 __ movptr(r10, (intptr_t) c);
1688 switch (code) {
1689 case lir_add:
1690 __ addptr(lreg_lo, r10);
1691 break;
1692 case lir_sub:
1693 __ subptr(lreg_lo, r10);
1694 break;
1695 default:
1696 ShouldNotReachHere();
1697 }
1698
1699 } else {
1700 ShouldNotReachHere();
1701 }
1702
1703 } else if (left->is_single_xmm()) {
1704 assert(left == dest, "left and dest must be equal");
1705 XMMRegister lreg = left->as_xmm_float_reg();
1706
1707 if (right->is_single_xmm()) {
1708 XMMRegister rreg = right->as_xmm_float_reg();
1709 switch (code) {
1710 case lir_add: __ addss(lreg, rreg); break;
1711 case lir_sub: __ subss(lreg, rreg); break;
1712 case lir_mul: __ mulss(lreg, rreg); break;
1713 case lir_div: __ divss(lreg, rreg); break;
1714 default: ShouldNotReachHere();
1715 }
1716 } else {
1717 Address raddr;
1718 if (right->is_single_stack()) {
1719 raddr = frame_map()->address_for_slot(right->single_stack_ix());
1720 } else if (right->is_constant()) {
1721 // hack for now
1722 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
1723 } else {
1724 ShouldNotReachHere();
1725 }
1726 switch (code) {
1727 case lir_add: __ addss(lreg, raddr); break;
1728 case lir_sub: __ subss(lreg, raddr); break;
1729 case lir_mul: __ mulss(lreg, raddr); break;
1730 case lir_div: __ divss(lreg, raddr); break;
1731 default: ShouldNotReachHere();
1732 }
1733 }
1734
1735 } else if (left->is_double_xmm()) {
1736 assert(left == dest, "left and dest must be equal");
1737
1738 XMMRegister lreg = left->as_xmm_double_reg();
1739 if (right->is_double_xmm()) {
1740 XMMRegister rreg = right->as_xmm_double_reg();
1741 switch (code) {
1742 case lir_add: __ addsd(lreg, rreg); break;
1743 case lir_sub: __ subsd(lreg, rreg); break;
1744 case lir_mul: __ mulsd(lreg, rreg); break;
1745 case lir_div: __ divsd(lreg, rreg); break;
1746 default: ShouldNotReachHere();
1747 }
1748 } else {
1749 Address raddr;
1750 if (right->is_double_stack()) {
1751 raddr = frame_map()->address_for_slot(right->double_stack_ix());
1752 } else if (right->is_constant()) {
1753 // hack for now
1754 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
1755 } else {
1756 ShouldNotReachHere();
1757 }
1758 switch (code) {
1759 case lir_add: __ addsd(lreg, raddr); break;
1760 case lir_sub: __ subsd(lreg, raddr); break;
1761 case lir_mul: __ mulsd(lreg, raddr); break;
1762 case lir_div: __ divsd(lreg, raddr); break;
1763 default: ShouldNotReachHere();
1764 }
1765 }
1766
1767 } else if (left->is_single_stack() || left->is_address()) {
1768 assert(left == dest, "left and dest must be equal");
1769
1770 Address laddr;
1771 if (left->is_single_stack()) {
1772 laddr = frame_map()->address_for_slot(left->single_stack_ix());
1773 } else if (left->is_address()) {
1774 laddr = as_Address(left->as_address_ptr());
1775 } else {
1776 ShouldNotReachHere();
1777 }
1778
1779 if (right->is_single_cpu()) {
1780 Register rreg = right->as_register();
1781 switch (code) {
1782 case lir_add: __ addl(laddr, rreg); break;
1783 case lir_sub: __ subl(laddr, rreg); break;
1784 default: ShouldNotReachHere();
1785 }
1786 } else if (right->is_constant()) {
1787 jint c = right->as_constant_ptr()->as_jint();
1788 switch (code) {
1789 case lir_add: {
1790 __ incrementl(laddr, c);
1791 break;
1792 }
1793 case lir_sub: {
1794 __ decrementl(laddr, c);
1795 break;
1796 }
1797 default: ShouldNotReachHere();
1798 }
1799 } else {
1800 ShouldNotReachHere();
1801 }
1802
1803 } else {
1804 ShouldNotReachHere();
1805 }
1806 }
1807
1808
1809 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1810 if (value->is_double_xmm()) {
1811 switch(code) {
1812 case lir_abs :
1813 {
1814 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1815 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1816 }
1817 assert(!tmp->is_valid(), "do not need temporary");
1818 __ andpd(dest->as_xmm_double_reg(),
1819 ExternalAddress(LIR_Assembler::double_signmask_pool),
1820 rscratch1);
1821 }
1822 break;
1823
1824 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1825 // all other intrinsics are not available in the SSE instruction set, so FPU is used
1826 default : ShouldNotReachHere();
1827 }
1828
1829 } else if (code == lir_f2hf) {
1830 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1831 } else if (code == lir_hf2f) {
1832 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1833 } else {
1834 Unimplemented();
1835 }
1836 }
1837
1838 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1839 // assert(left->destroys_register(), "check");
1840 if (left->is_single_cpu()) {
1841 Register reg = left->as_register();
1842 if (right->is_constant()) {
1843 int val = right->as_constant_ptr()->as_jint();
1844 switch (code) {
1845 case lir_logic_and: __ andl (reg, val); break;
1846 case lir_logic_or: __ orl (reg, val); break;
1847 case lir_logic_xor: __ xorl (reg, val); break;
1848 default: ShouldNotReachHere();
1849 }
1850 } else if (right->is_stack()) {
1851 // added support for stack operands
1852 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1853 switch (code) {
1854 case lir_logic_and: __ andl (reg, raddr); break;
1855 case lir_logic_or: __ orl (reg, raddr); break;
1856 case lir_logic_xor: __ xorl (reg, raddr); break;
1857 default: ShouldNotReachHere();
1858 }
1859 } else {
1860 Register rright = right->as_register();
1861 switch (code) {
1862 case lir_logic_and: __ andptr (reg, rright); break;
1863 case lir_logic_or : __ orptr (reg, rright); break;
1864 case lir_logic_xor: __ xorptr (reg, rright); break;
1865 default: ShouldNotReachHere();
1866 }
1867 }
1868 move_regs(reg, dst->as_register());
1869 } else {
1870 Register l_lo = left->as_register_lo();
1871 Register l_hi = left->as_register_hi();
1872 if (right->is_constant()) {
1873 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
1874 switch (code) {
1875 case lir_logic_and:
1876 __ andq(l_lo, rscratch1);
1877 break;
1878 case lir_logic_or:
1879 __ orq(l_lo, rscratch1);
1880 break;
1881 case lir_logic_xor:
1882 __ xorq(l_lo, rscratch1);
1883 break;
1884 default: ShouldNotReachHere();
1885 }
1886 } else {
1887 Register r_lo;
1888 if (is_reference_type(right->type())) {
1889 r_lo = right->as_register();
1890 } else {
1891 r_lo = right->as_register_lo();
1892 }
1893 switch (code) {
1894 case lir_logic_and:
1895 __ andptr(l_lo, r_lo);
1896 break;
1897 case lir_logic_or:
1898 __ orptr(l_lo, r_lo);
1899 break;
1900 case lir_logic_xor:
1901 __ xorptr(l_lo, r_lo);
1902 break;
1903 default: ShouldNotReachHere();
1904 }
1905 }
1906
1907 Register dst_lo = dst->as_register_lo();
1908 Register dst_hi = dst->as_register_hi();
1909
1910 move_regs(l_lo, dst_lo);
1911 }
1912 }
1913
1914
1915 // we assume that rax, and rdx can be overwritten
1916 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
1917
1918 assert(left->is_single_cpu(), "left must be register");
1919 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
1920 assert(result->is_single_cpu(), "result must be register");
1921
1922 // assert(left->destroys_register(), "check");
1923 // assert(right->destroys_register(), "check");
1924
1925 Register lreg = left->as_register();
1926 Register dreg = result->as_register();
1927
1928 if (right->is_constant()) {
1929 jint divisor = right->as_constant_ptr()->as_jint();
1930 assert(divisor > 0 && is_power_of_2(divisor), "must be");
1931 if (code == lir_idiv) {
1932 assert(lreg == rax, "must be rax,");
1933 assert(temp->as_register() == rdx, "tmp register must be rdx");
1934 __ cdql(); // sign extend into rdx:rax
1935 if (divisor == 2) {
1936 __ subl(lreg, rdx);
1937 } else {
1938 __ andl(rdx, divisor - 1);
1939 __ addl(lreg, rdx);
1940 }
1941 __ sarl(lreg, log2i_exact(divisor));
1942 move_regs(lreg, dreg);
1943 } else if (code == lir_irem) {
1944 Label done;
1945 __ mov(dreg, lreg);
1946 __ andl(dreg, 0x80000000 | (divisor - 1));
1947 __ jcc(Assembler::positive, done);
1948 __ decrement(dreg);
1949 __ orl(dreg, ~(divisor - 1));
1950 __ increment(dreg);
1951 __ bind(done);
1952 } else {
1953 ShouldNotReachHere();
1954 }
1955 } else {
1956 Register rreg = right->as_register();
1957 assert(lreg == rax, "left register must be rax,");
1958 assert(rreg != rdx, "right register must not be rdx");
1959 assert(temp->as_register() == rdx, "tmp register must be rdx");
1960
1961 move_regs(lreg, rax);
1962
1963 int idivl_offset = __ corrected_idivl(rreg);
1964 if (ImplicitDiv0Checks) {
1965 add_debug_info_for_div0(idivl_offset, info);
1966 }
1967 if (code == lir_irem) {
1968 move_regs(rdx, dreg); // result is in rdx
1969 } else {
1970 move_regs(rax, dreg);
1971 }
1972 }
1973 }
1974
1975
1976 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1977 if (opr1->is_single_cpu()) {
1978 Register reg1 = opr1->as_register();
1979 if (opr2->is_single_cpu()) {
1980 // cpu register - cpu register
1981 if (is_reference_type(opr1->type())) {
1982 __ cmpoop(reg1, opr2->as_register());
1983 } else {
1984 assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1985 __ cmpl(reg1, opr2->as_register());
1986 }
1987 } else if (opr2->is_stack()) {
1988 // cpu register - stack
1989 if (is_reference_type(opr1->type())) {
1990 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1991 } else {
1992 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1993 }
1994 } else if (opr2->is_constant()) {
1995 // cpu register - constant
1996 LIR_Const* c = opr2->as_constant_ptr();
1997 if (c->type() == T_INT) {
1998 jint i = c->as_jint();
1999 if (i == 0) {
2000 __ testl(reg1, reg1);
2001 } else {
2002 __ cmpl(reg1, i);
2003 }
2004 } else if (c->type() == T_METADATA) {
2005 // All we need for now is a comparison with null for equality.
2006 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
2007 Metadata* m = c->as_metadata();
2008 if (m == nullptr) {
2009 __ testptr(reg1, reg1);
2010 } else {
2011 ShouldNotReachHere();
2012 }
2013 } else if (is_reference_type(c->type())) {
2014 // In 64bit oops are single register
2015 jobject o = c->as_jobject();
2016 if (o == nullptr) {
2017 __ testptr(reg1, reg1);
2018 } else {
2019 __ cmpoop(reg1, o, rscratch1);
2020 }
2021 } else {
2022 fatal("unexpected type: %s", basictype_to_str(c->type()));
2023 }
2024 // cpu register - address
2025 } else if (opr2->is_address()) {
2026 if (op->info() != nullptr) {
2027 add_debug_info_for_null_check_here(op->info());
2028 }
2029 __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2030 } else {
2031 ShouldNotReachHere();
2032 }
2033
2034 } else if(opr1->is_double_cpu()) {
2035 Register xlo = opr1->as_register_lo();
2036 Register xhi = opr1->as_register_hi();
2037 if (opr2->is_double_cpu()) {
2038 __ cmpptr(xlo, opr2->as_register_lo());
2039 } else if (opr2->is_constant()) {
2040 // cpu register - constant 0
2041 assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2042 __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2043 } else {
2044 ShouldNotReachHere();
2045 }
2046
2047 } else if (opr1->is_single_xmm()) {
2048 XMMRegister reg1 = opr1->as_xmm_float_reg();
2049 if (opr2->is_single_xmm()) {
2050 // xmm register - xmm register
2051 __ ucomiss(reg1, opr2->as_xmm_float_reg());
2052 } else if (opr2->is_stack()) {
2053 // xmm register - stack
2054 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2055 } else if (opr2->is_constant()) {
2056 // xmm register - constant
2057 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2058 } else if (opr2->is_address()) {
2059 // xmm register - address
2060 if (op->info() != nullptr) {
2061 add_debug_info_for_null_check_here(op->info());
2062 }
2063 __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2064 } else {
2065 ShouldNotReachHere();
2066 }
2067
2068 } else if (opr1->is_double_xmm()) {
2069 XMMRegister reg1 = opr1->as_xmm_double_reg();
2070 if (opr2->is_double_xmm()) {
2071 // xmm register - xmm register
2072 __ ucomisd(reg1, opr2->as_xmm_double_reg());
2073 } else if (opr2->is_stack()) {
2074 // xmm register - stack
2075 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2076 } else if (opr2->is_constant()) {
2077 // xmm register - constant
2078 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2079 } else if (opr2->is_address()) {
2080 // xmm register - address
2081 if (op->info() != nullptr) {
2082 add_debug_info_for_null_check_here(op->info());
2083 }
2084 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2085 } else {
2086 ShouldNotReachHere();
2087 }
2088
2089 } else if (opr1->is_address() && opr2->is_constant()) {
2090 LIR_Const* c = opr2->as_constant_ptr();
2091 if (is_reference_type(c->type())) {
2092 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2093 __ movoop(rscratch1, c->as_jobject());
2094 }
2095 if (op->info() != nullptr) {
2096 add_debug_info_for_null_check_here(op->info());
2097 }
2098 // special case: address - constant
2099 LIR_Address* addr = opr1->as_address_ptr();
2100 if (c->type() == T_INT) {
2101 __ cmpl(as_Address(addr), c->as_jint());
2102 } else if (is_reference_type(c->type())) {
2103 // %%% Make this explode if addr isn't reachable until we figure out a
2104 // better strategy by giving noreg as the temp for as_Address
2105 __ cmpoop(rscratch1, as_Address(addr, noreg));
2106 } else {
2107 ShouldNotReachHere();
2108 }
2109
2110 } else {
2111 ShouldNotReachHere();
2112 }
2113 }
2114
2115 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2116 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2117 if (left->is_single_xmm()) {
2118 assert(right->is_single_xmm(), "must match");
2119 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2120 } else if (left->is_double_xmm()) {
2121 assert(right->is_double_xmm(), "must match");
2122 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2123
2124 } else {
2125 ShouldNotReachHere();
2126 }
2127 } else {
2128 assert(code == lir_cmp_l2i, "check");
2129 Label done;
2130 Register dest = dst->as_register();
2131 __ cmpptr(left->as_register_lo(), right->as_register_lo());
2132 __ movl(dest, -1);
2133 __ jccb(Assembler::less, done);
2134 __ setb(Assembler::notZero, dest);
2135 __ movzbl(dest, dest);
2136 __ bind(done);
2137 }
2138 }
2139
2140
2141 void LIR_Assembler::align_call(LIR_Code code) {
2142 // make sure that the displacement word of the call ends up word aligned
2143 int offset = __ offset();
2144 switch (code) {
2145 case lir_static_call:
2146 case lir_optvirtual_call:
2147 case lir_dynamic_call:
2148 offset += NativeCall::displacement_offset;
2149 break;
2150 case lir_icvirtual_call:
2151 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex;
2152 break;
2153 default: ShouldNotReachHere();
2154 }
2155 __ align(BytesPerWord, offset);
2156 }
2157
2158
2159 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2160 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2161 "must be aligned");
2162 __ call(AddressLiteral(op->addr(), rtype));
2163 add_call_info(code_offset(), op->info());
2164 __ post_call_nop();
2165 }
2166
2167
2168 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2169 __ ic_call(op->addr());
2170 add_call_info(code_offset(), op->info());
2171 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2172 "must be aligned");
2173 __ post_call_nop();
2174 }
2175
2176
2177 void LIR_Assembler::emit_static_call_stub() {
2178 address call_pc = __ pc();
2179 address stub = __ start_a_stub(call_stub_size());
2180 if (stub == nullptr) {
2181 bailout("static call stub overflow");
2182 return;
2183 }
2184
2185 int start = __ offset();
2186
2187 // make sure that the displacement word of the call ends up word aligned
2188 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset);
2189 __ relocate(static_stub_Relocation::spec(call_pc));
2190 __ mov_metadata(rbx, (Metadata*)nullptr);
2191 // must be set to -1 at code generation time
2192 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2193 // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2194 __ jump(RuntimeAddress(__ pc()));
2195
2196 assert(__ offset() - start <= call_stub_size(), "stub too big");
2197 __ end_a_stub();
2198 }
2199
2200
2201 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2202 assert(exceptionOop->as_register() == rax, "must match");
2203 assert(exceptionPC->as_register() == rdx, "must match");
2204
2205 // exception object is not added to oop map by LinearScan
2206 // (LinearScan assumes that no oops are in fixed registers)
2207 info->add_register_oop(exceptionOop);
2208 StubId unwind_id;
2209
2210 // get current pc information
2211 // pc is only needed if the method has an exception handler, the unwind code does not need it.
2212 int pc_for_athrow_offset = __ offset();
2213 InternalAddress pc_for_athrow(__ pc());
2214 __ lea(exceptionPC->as_register(), pc_for_athrow);
2215 add_call_info(pc_for_athrow_offset, info); // for exception handler
2216
2217 __ verify_not_null_oop(rax);
2218 // search an exception handler (rax: exception oop, rdx: throwing pc)
2219 if (compilation()->has_fpu_code()) {
2220 unwind_id = StubId::c1_handle_exception_id;
2221 } else {
2222 unwind_id = StubId::c1_handle_exception_nofpu_id;
2223 }
2224 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2225
2226 // enough room for two byte trap
2227 __ nop();
2228 }
2229
2230
2231 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2232 assert(exceptionOop->as_register() == rax, "must match");
2233
2234 __ jmp(_unwind_handler_entry);
2235 }
2236
2237
2238 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2239
2240 // optimized version for linear scan:
2241 // * count must be already in ECX (guaranteed by LinearScan)
2242 // * left and dest must be equal
2243 // * tmp must be unused
2244 assert(count->as_register() == SHIFT_count, "count must be in ECX");
2245 assert(left == dest, "left and dest must be equal");
2246 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2247
2248 if (left->is_single_cpu()) {
2249 Register value = left->as_register();
2250 assert(value != SHIFT_count, "left cannot be ECX");
2251
2252 switch (code) {
2253 case lir_shl: __ shll(value); break;
2254 case lir_shr: __ sarl(value); break;
2255 case lir_ushr: __ shrl(value); break;
2256 default: ShouldNotReachHere();
2257 }
2258 } else if (left->is_double_cpu()) {
2259 Register lo = left->as_register_lo();
2260 Register hi = left->as_register_hi();
2261 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2262 switch (code) {
2263 case lir_shl: __ shlptr(lo); break;
2264 case lir_shr: __ sarptr(lo); break;
2265 case lir_ushr: __ shrptr(lo); break;
2266 default: ShouldNotReachHere();
2267 }
2268 } else {
2269 ShouldNotReachHere();
2270 }
2271 }
2272
2273
2274 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2275 if (dest->is_single_cpu()) {
2276 // first move left into dest so that left is not destroyed by the shift
2277 Register value = dest->as_register();
2278 count = count & 0x1F; // Java spec
2279
2280 move_regs(left->as_register(), value);
2281 switch (code) {
2282 case lir_shl: __ shll(value, count); break;
2283 case lir_shr: __ sarl(value, count); break;
2284 case lir_ushr: __ shrl(value, count); break;
2285 default: ShouldNotReachHere();
2286 }
2287 } else if (dest->is_double_cpu()) {
2288 // first move left into dest so that left is not destroyed by the shift
2289 Register value = dest->as_register_lo();
2290 count = count & 0x1F; // Java spec
2291
2292 move_regs(left->as_register_lo(), value);
2293 switch (code) {
2294 case lir_shl: __ shlptr(value, count); break;
2295 case lir_shr: __ sarptr(value, count); break;
2296 case lir_ushr: __ shrptr(value, count); break;
2297 default: ShouldNotReachHere();
2298 }
2299 } else {
2300 ShouldNotReachHere();
2301 }
2302 }
2303
2304
2305 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2306 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2307 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2308 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2309 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
2310 }
2311
2312
2313 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2314 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2315 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2316 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2317 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
2318 }
2319
2320
2321 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2322 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2323 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2324 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2325 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
2326 }
2327
2328
2329 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
2330 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2331 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2332 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2333 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
2334 }
2335
2336
2337 // This code replaces a call to arraycopy; no exception may
2338 // be thrown in this code, they must be thrown in the System.arraycopy
2339 // activation frame; we could save some checks if this would not be the case
2340 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2341 ciArrayKlass* default_type = op->expected_type();
2342 Register src = op->src()->as_register();
2343 Register dst = op->dst()->as_register();
2344 Register src_pos = op->src_pos()->as_register();
2345 Register dst_pos = op->dst_pos()->as_register();
2346 Register length = op->length()->as_register();
2347 Register tmp = op->tmp()->as_register();
2348 Register tmp_load_klass = rscratch1;
2349 Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
2350
2351 CodeStub* stub = op->stub();
2352 int flags = op->flags();
2353 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2354 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2355
2356 // if we don't know anything, just go through the generic arraycopy
2357 if (default_type == nullptr) {
2358 // save outgoing arguments on stack in case call to System.arraycopy is needed
2359 // HACK ALERT. This code used to push the parameters in a hardwired fashion
2360 // for interpreter calling conventions. Now we have to do it in new style conventions.
2361 // For the moment until C1 gets the new register allocator I just force all the
2362 // args to the right place (except the register args) and then on the back side
2363 // reload the register args properly if we go slow path. Yuck
2364
2365 // These are proper for the calling convention
2366 store_parameter(length, 2);
2367 store_parameter(dst_pos, 1);
2368 store_parameter(dst, 0);
2369
2370 // these are just temporary placements until we need to reload
2371 store_parameter(src_pos, 3);
2372 store_parameter(src, 4);
2373
2374 address copyfunc_addr = StubRoutines::generic_arraycopy();
2375 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2376
2377 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
2378 // The arguments are in java calling convention so we can trivially shift them to C
2379 // convention
2380 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2381 __ mov(c_rarg0, j_rarg0);
2382 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2383 __ mov(c_rarg1, j_rarg1);
2384 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2385 __ mov(c_rarg2, j_rarg2);
2386 assert_different_registers(c_rarg3, j_rarg4);
2387 __ mov(c_rarg3, j_rarg3);
2388 #ifdef _WIN64
2389 // Allocate abi space for args but be sure to keep stack aligned
2390 __ subptr(rsp, 6*wordSize);
2391 store_parameter(j_rarg4, 4);
2392 #ifndef PRODUCT
2393 if (PrintC1Statistics) {
2394 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2395 }
2396 #endif
2397 __ call(RuntimeAddress(copyfunc_addr));
2398 __ addptr(rsp, 6*wordSize);
2399 #else
2400 __ mov(c_rarg4, j_rarg4);
2401 #ifndef PRODUCT
2402 if (PrintC1Statistics) {
2403 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2404 }
2405 #endif
2406 __ call(RuntimeAddress(copyfunc_addr));
2407 #endif // _WIN64
2408
2409 __ testl(rax, rax);
2410 __ jcc(Assembler::equal, *stub->continuation());
2411
2412 __ mov(tmp, rax);
2413 __ xorl(tmp, -1);
2414
2415 // Reload values from the stack so they are where the stub
2416 // expects them.
2417 __ movptr (dst, Address(rsp, 0*BytesPerWord));
2418 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
2419 __ movptr (length, Address(rsp, 2*BytesPerWord));
2420 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
2421 __ movptr (src, Address(rsp, 4*BytesPerWord));
2422
2423 __ subl(length, tmp);
2424 __ addl(src_pos, tmp);
2425 __ addl(dst_pos, tmp);
2426 __ jmp(*stub->entry());
2427
2428 __ bind(*stub->continuation());
2429 return;
2430 }
2431
2432 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2433
2434 int elem_size = type2aelembytes(basic_type);
2435 Address::ScaleFactor scale;
2436
2437 switch (elem_size) {
2438 case 1 :
2439 scale = Address::times_1;
2440 break;
2441 case 2 :
2442 scale = Address::times_2;
2443 break;
2444 case 4 :
2445 scale = Address::times_4;
2446 break;
2447 case 8 :
2448 scale = Address::times_8;
2449 break;
2450 default:
2451 scale = Address::no_scale;
2452 ShouldNotReachHere();
2453 }
2454
2455 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2456 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2457
2458 // length and pos's are all sign extended at this point on 64bit
2459
2460 // test for null
2461 if (flags & LIR_OpArrayCopy::src_null_check) {
2462 __ testptr(src, src);
2463 __ jcc(Assembler::zero, *stub->entry());
2464 }
2465 if (flags & LIR_OpArrayCopy::dst_null_check) {
2466 __ testptr(dst, dst);
2467 __ jcc(Assembler::zero, *stub->entry());
2468 }
2469
2470 // If the compiler was not able to prove that exact type of the source or the destination
2471 // of the arraycopy is an array type, check at runtime if the source or the destination is
2472 // an instance type.
2473 if (flags & LIR_OpArrayCopy::type_check) {
2474 if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2475 __ load_klass(tmp, dst, tmp_load_klass);
2476 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2477 __ jcc(Assembler::greaterEqual, *stub->entry());
2478 }
2479
2480 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2481 __ load_klass(tmp, src, tmp_load_klass);
2482 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2483 __ jcc(Assembler::greaterEqual, *stub->entry());
2484 }
2485 }
2486
2487 // check if negative
2488 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2489 __ testl(src_pos, src_pos);
2490 __ jcc(Assembler::less, *stub->entry());
2491 }
2492 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2493 __ testl(dst_pos, dst_pos);
2494 __ jcc(Assembler::less, *stub->entry());
2495 }
2496
2497 if (flags & LIR_OpArrayCopy::src_range_check) {
2498 __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
2499 __ cmpl(tmp, src_length_addr);
2500 __ jcc(Assembler::above, *stub->entry());
2501 }
2502 if (flags & LIR_OpArrayCopy::dst_range_check) {
2503 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
2504 __ cmpl(tmp, dst_length_addr);
2505 __ jcc(Assembler::above, *stub->entry());
2506 }
2507
2508 if (flags & LIR_OpArrayCopy::length_positive_check) {
2509 __ testl(length, length);
2510 __ jcc(Assembler::less, *stub->entry());
2511 }
2512
2513 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
2514 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
2515
2516 if (flags & LIR_OpArrayCopy::type_check) {
2517 // We don't know the array types are compatible
2518 if (basic_type != T_OBJECT) {
2519 // Simple test for basic type arrays
2520 __ cmp_klasses_from_objects(src, dst, tmp, tmp2);
2521 __ jcc(Assembler::notEqual, *stub->entry());
2522 } else {
2523 // For object arrays, if src is a sub class of dst then we can
2524 // safely do the copy.
2525 Label cont, slow;
2526
2527 __ push_ppx(src);
2528 __ push_ppx(dst);
2529
2530 __ load_klass(src, src, tmp_load_klass);
2531 __ load_klass(dst, dst, tmp_load_klass);
2532
2533 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2534
2535 __ push_ppx(src);
2536 __ push_ppx(dst);
2537 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2538 __ pop_ppx(dst);
2539 __ pop_ppx(src);
2540
2541 __ testl(src, src);
2542 __ jcc(Assembler::notEqual, cont);
2543
2544 __ bind(slow);
2545 __ pop_ppx(dst);
2546 __ pop_ppx(src);
2547
2548 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2549 if (copyfunc_addr != nullptr) { // use stub if available
2550 // src is not a sub class of dst so we have to do a
2551 // per-element check.
2552
2553 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2554 if ((flags & mask) != mask) {
2555 // Check that at least both of them object arrays.
2556 assert(flags & mask, "one of the two should be known to be an object array");
2557
2558 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2559 __ load_klass(tmp, src, tmp_load_klass);
2560 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2561 __ load_klass(tmp, dst, tmp_load_klass);
2562 }
2563 int lh_offset = in_bytes(Klass::layout_helper_offset());
2564 Address klass_lh_addr(tmp, lh_offset);
2565 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2566 __ cmpl(klass_lh_addr, objArray_lh);
2567 __ jcc(Assembler::notEqual, *stub->entry());
2568 }
2569
2570 // Spill because stubs can use any register they like and it's
2571 // easier to restore just those that we care about.
2572 store_parameter(dst, 0);
2573 store_parameter(dst_pos, 1);
2574 store_parameter(length, 2);
2575 store_parameter(src_pos, 3);
2576 store_parameter(src, 4);
2577
2578 __ movl2ptr(length, length); //higher 32bits must be null
2579
2580 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2581 assert_different_registers(c_rarg0, dst, dst_pos, length);
2582 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2583 assert_different_registers(c_rarg1, dst, length);
2584
2585 __ mov(c_rarg2, length);
2586 assert_different_registers(c_rarg2, dst);
2587
2588 #ifdef _WIN64
2589 // Allocate abi space for args but be sure to keep stack aligned
2590 __ subptr(rsp, 6*wordSize);
2591 __ load_klass(c_rarg3, dst, tmp_load_klass);
2592 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
2593 store_parameter(c_rarg3, 4);
2594 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
2595 __ call(RuntimeAddress(copyfunc_addr));
2596 __ addptr(rsp, 6*wordSize);
2597 #else
2598 __ load_klass(c_rarg4, dst, tmp_load_klass);
2599 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2600 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2601 __ call(RuntimeAddress(copyfunc_addr));
2602 #endif
2603
2604 #ifndef PRODUCT
2605 if (PrintC1Statistics) {
2606 Label failed;
2607 __ testl(rax, rax);
2608 __ jcc(Assembler::notZero, failed);
2609 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1);
2610 __ bind(failed);
2611 }
2612 #endif
2613
2614 __ testl(rax, rax);
2615 __ jcc(Assembler::zero, *stub->continuation());
2616
2617 #ifndef PRODUCT
2618 if (PrintC1Statistics) {
2619 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1);
2620 }
2621 #endif
2622
2623 __ mov(tmp, rax);
2624
2625 __ xorl(tmp, -1);
2626
2627 // Restore previously spilled arguments
2628 __ movptr (dst, Address(rsp, 0*BytesPerWord));
2629 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
2630 __ movptr (length, Address(rsp, 2*BytesPerWord));
2631 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
2632 __ movptr (src, Address(rsp, 4*BytesPerWord));
2633
2634
2635 __ subl(length, tmp);
2636 __ addl(src_pos, tmp);
2637 __ addl(dst_pos, tmp);
2638 }
2639
2640 __ jmp(*stub->entry());
2641
2642 __ bind(cont);
2643 __ pop(dst);
2644 __ pop(src);
2645 }
2646 }
2647
2648 #ifdef ASSERT
2649 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2650 // Sanity check the known type with the incoming class. For the
2651 // primitive case the types must match exactly with src.klass and
2652 // dst.klass each exactly matching the default type. For the
2653 // object array case, if no type check is needed then either the
2654 // dst type is exactly the expected type and the src type is a
2655 // subtype which we can't check or src is the same array as dst
2656 // but not necessarily exactly of type default_type.
2657 Label known_ok, halt;
2658 __ mov_metadata(tmp, default_type->constant_encoding());
2659 if (UseCompressedClassPointers) {
2660 __ encode_klass_not_null(tmp, rscratch1);
2661 }
2662
2663 if (basic_type != T_OBJECT) {
2664 __ cmp_klass(tmp, dst, tmp2);
2665 __ jcc(Assembler::notEqual, halt);
2666 __ cmp_klass(tmp, src, tmp2);
2667 __ jcc(Assembler::equal, known_ok);
2668 } else {
2669 __ cmp_klass(tmp, dst, tmp2);
2670 __ jcc(Assembler::equal, known_ok);
2671 __ cmpptr(src, dst);
2672 __ jcc(Assembler::equal, known_ok);
2673 }
2674 __ bind(halt);
2675 __ stop("incorrect type information in arraycopy");
2676 __ bind(known_ok);
2677 }
2678 #endif
2679
2680 #ifndef PRODUCT
2681 if (PrintC1Statistics) {
2682 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1);
2683 }
2684 #endif
2685
2686 assert_different_registers(c_rarg0, dst, dst_pos, length);
2687 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2688 assert_different_registers(c_rarg1, length);
2689 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2690 __ mov(c_rarg2, length);
2691
2692 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2693 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2694 const char *name;
2695 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2696 __ call_VM_leaf(entry, 0);
2697
2698 if (stub != nullptr) {
2699 __ bind(*stub->continuation());
2700 }
2701 }
2702
2703 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2704 assert(op->crc()->is_single_cpu(), "crc must be register");
2705 assert(op->val()->is_single_cpu(), "byte value must be register");
2706 assert(op->result_opr()->is_single_cpu(), "result must be register");
2707 Register crc = op->crc()->as_register();
2708 Register val = op->val()->as_register();
2709 Register res = op->result_opr()->as_register();
2710
2711 assert_different_registers(val, crc, res);
2712
2713 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
2714 __ notl(crc); // ~crc
2715 __ update_byte_crc32(crc, val, res);
2716 __ notl(crc); // ~crc
2717 __ mov(res, crc);
2718 }
2719
2720 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2721 Register obj = op->obj_opr()->as_register(); // may not be an oop
2722 Register hdr = op->hdr_opr()->as_register();
2723 Register lock = op->lock_opr()->as_register();
2724 if (op->code() == lir_lock) {
2725 Register tmp = op->scratch_opr()->as_register();
2726 // add debug info for NullPointerException only if one is possible
2727 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2728 if (op->info() != nullptr) {
2729 add_debug_info_for_null_check(null_check_offset, op->info());
2730 }
2731 // done
2732 } else if (op->code() == lir_unlock) {
2733 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2734 } else {
2735 Unimplemented();
2736 }
2737 __ bind(*op->stub()->continuation());
2738 }
2739
2740 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2741 Register obj = op->obj()->as_pointer_register();
2742 Register result = op->result_opr()->as_pointer_register();
2743
2744 CodeEmitInfo* info = op->info();
2745 if (info != nullptr) {
2746 add_debug_info_for_null_check_here(info);
2747 }
2748
2749 __ load_klass(result, obj, rscratch1);
2750 }
2751
2752 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2753 ciMethod* method = op->profiled_method();
2754 int bci = op->profiled_bci();
2755 ciMethod* callee = op->profiled_callee();
2756 Register tmp_load_klass = rscratch1;
2757
2758 // Update counter for all call types
2759 ciMethodData* md = method->method_data_or_null();
2760 assert(md != nullptr, "Sanity");
2761 ciProfileData* data = md->bci_to_data(bci);
2762 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2763 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2764 Register mdo = op->mdo()->as_register();
2765 __ mov_metadata(mdo, md->constant_encoding());
2766 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2767 // Perform additional virtual call profiling for invokevirtual and
2768 // invokeinterface bytecodes
2769 if (op->should_profile_receiver_type()) {
2770 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2771 Register recv = op->recv()->as_register();
2772 assert_different_registers(mdo, recv);
2773 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2774 ciKlass* known_klass = op->known_holder();
2775 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2776 // We know the type that will be seen at this call site; we can
2777 // statically update the MethodData* rather than needing to do
2778 // dynamic tests on the receiver type.
2779 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2780 for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
2781 ciKlass* receiver = vc_data->receiver(i);
2782 if (known_klass->equals(receiver)) {
2783 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2784 __ addptr(data_addr, DataLayout::counter_increment);
2785 return;
2786 }
2787 }
2788 // Receiver type is not found in profile data.
2789 // Fall back to runtime helper to handle the rest at runtime.
2790 __ mov_metadata(recv, known_klass->constant_encoding());
2791 } else {
2792 __ load_klass(recv, recv, tmp_load_klass);
2793 }
2794 type_profile_helper(mdo, md, data, recv);
2795 } else {
2796 // Static call
2797 __ addptr(counter_addr, DataLayout::counter_increment);
2798 }
2799 }
2800
2801 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2802 Register obj = op->obj()->as_register();
2803 Register tmp = op->tmp()->as_pointer_register();
2804 Register tmp_load_klass = rscratch1;
2805 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2806 ciKlass* exact_klass = op->exact_klass();
2807 intptr_t current_klass = op->current_klass();
2808 bool not_null = op->not_null();
2809 bool no_conflict = op->no_conflict();
2810
2811 Label update, next, none;
2812
2813 bool do_null = !not_null;
2814 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2815 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2816
2817 assert(do_null || do_update, "why are we here?");
2818 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2819
2820 __ verify_oop(obj);
2821
2822 #ifdef ASSERT
2823 if (obj == tmp) {
2824 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
2825 } else {
2826 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index());
2827 }
2828 #endif
2829 if (do_null) {
2830 __ testptr(obj, obj);
2831 __ jccb(Assembler::notZero, update);
2832 if (!TypeEntries::was_null_seen(current_klass)) {
2833 __ testptr(mdo_addr, TypeEntries::null_seen);
2834 #ifndef ASSERT
2835 __ jccb(Assembler::notZero, next); // already set
2836 #else
2837 __ jcc(Assembler::notZero, next); // already set
2838 #endif
2839 // atomic update to prevent overwriting Klass* with 0
2840 __ lock();
2841 __ orptr(mdo_addr, TypeEntries::null_seen);
2842 }
2843 if (do_update) {
2844 #ifndef ASSERT
2845 __ jmpb(next);
2846 }
2847 #else
2848 __ jmp(next);
2849 }
2850 } else {
2851 __ testptr(obj, obj);
2852 __ jcc(Assembler::notZero, update);
2853 __ stop("unexpected null obj");
2854 #endif
2855 }
2856
2857 __ bind(update);
2858
2859 if (do_update) {
2860 #ifdef ASSERT
2861 if (exact_klass != nullptr) {
2862 Label ok;
2863 __ load_klass(tmp, obj, tmp_load_klass);
2864 __ push_ppx(tmp);
2865 __ mov_metadata(tmp, exact_klass->constant_encoding());
2866 __ cmpptr(tmp, Address(rsp, 0));
2867 __ jcc(Assembler::equal, ok);
2868 __ stop("exact klass and actual klass differ");
2869 __ bind(ok);
2870 __ pop_ppx(tmp);
2871 }
2872 #endif
2873 if (!no_conflict) {
2874 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2875 if (exact_klass != nullptr) {
2876 __ mov_metadata(tmp, exact_klass->constant_encoding());
2877 } else {
2878 __ load_klass(tmp, obj, tmp_load_klass);
2879 }
2880 __ mov(rscratch1, tmp); // save original value before XOR
2881 __ xorptr(tmp, mdo_addr);
2882 __ testptr(tmp, TypeEntries::type_klass_mask);
2883 // klass seen before, nothing to do. The unknown bit may have been
2884 // set already but no need to check.
2885 __ jccb(Assembler::zero, next);
2886
2887 __ testptr(tmp, TypeEntries::type_unknown);
2888 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2889
2890 if (TypeEntries::is_type_none(current_klass)) {
2891 __ testptr(mdo_addr, TypeEntries::type_mask);
2892 __ jccb(Assembler::zero, none);
2893 // There is a chance that the checks above (re-reading profiling
2894 // data from memory) fail if another thread has just set the
2895 // profiling to this obj's klass
2896 __ mov(tmp, rscratch1); // get back original value before XOR
2897 __ xorptr(tmp, mdo_addr);
2898 __ testptr(tmp, TypeEntries::type_klass_mask);
2899 __ jccb(Assembler::zero, next);
2900 }
2901 } else {
2902 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2903 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2904
2905 __ testptr(mdo_addr, TypeEntries::type_unknown);
2906 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2907 }
2908
2909 // different than before. Cannot keep accurate profile.
2910 __ orptr(mdo_addr, TypeEntries::type_unknown);
2911
2912 if (TypeEntries::is_type_none(current_klass)) {
2913 __ jmpb(next);
2914
2915 __ bind(none);
2916 // first time here. Set profile type.
2917 __ movptr(mdo_addr, tmp);
2918 #ifdef ASSERT
2919 __ andptr(tmp, TypeEntries::type_klass_mask);
2920 __ verify_klass_ptr(tmp);
2921 #endif
2922 }
2923 } else {
2924 // There's a single possible klass at this profile point
2925 assert(exact_klass != nullptr, "should be");
2926 if (TypeEntries::is_type_none(current_klass)) {
2927 __ mov_metadata(tmp, exact_klass->constant_encoding());
2928 __ xorptr(tmp, mdo_addr);
2929 __ testptr(tmp, TypeEntries::type_klass_mask);
2930 #ifdef ASSERT
2931 __ jcc(Assembler::zero, next);
2932
2933 {
2934 Label ok;
2935 __ push_ppx(tmp);
2936 __ testptr(mdo_addr, TypeEntries::type_mask);
2937 __ jcc(Assembler::zero, ok);
2938 // may have been set by another thread
2939 __ mov_metadata(tmp, exact_klass->constant_encoding());
2940 __ xorptr(tmp, mdo_addr);
2941 __ testptr(tmp, TypeEntries::type_mask);
2942 __ jcc(Assembler::zero, ok);
2943
2944 __ stop("unexpected profiling mismatch");
2945 __ bind(ok);
2946 __ pop_ppx(tmp);
2947 }
2948 #else
2949 __ jccb(Assembler::zero, next);
2950 #endif
2951 // first time here. Set profile type.
2952 __ movptr(mdo_addr, tmp);
2953 #ifdef ASSERT
2954 __ andptr(tmp, TypeEntries::type_klass_mask);
2955 __ verify_klass_ptr(tmp);
2956 #endif
2957 } else {
2958 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2959 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2960
2961 __ testptr(mdo_addr, TypeEntries::type_unknown);
2962 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2963
2964 __ orptr(mdo_addr, TypeEntries::type_unknown);
2965 }
2966 }
2967 }
2968 __ bind(next);
2969 }
2970
2971 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2972 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2973 }
2974
2975
2976 void LIR_Assembler::align_backward_branch_target() {
2977 __ align(BytesPerWord);
2978 }
2979
2980
2981 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2982 if (left->is_single_cpu()) {
2983 __ negl(left->as_register());
2984 move_regs(left->as_register(), dest->as_register());
2985
2986 } else if (left->is_double_cpu()) {
2987 Register lo = left->as_register_lo();
2988 Register dst = dest->as_register_lo();
2989 __ movptr(dst, lo);
2990 __ negptr(dst);
2991
2992 } else if (dest->is_single_xmm()) {
2993 assert(!tmp->is_valid(), "do not need temporary");
2994 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
2995 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
2996 }
2997 __ xorps(dest->as_xmm_float_reg(),
2998 ExternalAddress((address)float_signflip_pool),
2999 rscratch1);
3000 } else if (dest->is_double_xmm()) {
3001 assert(!tmp->is_valid(), "do not need temporary");
3002 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3003 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3004 }
3005 __ xorpd(dest->as_xmm_double_reg(),
3006 ExternalAddress((address)double_signflip_pool),
3007 rscratch1);
3008 } else {
3009 ShouldNotReachHere();
3010 }
3011 }
3012
3013
3014 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3015 assert(src->is_address(), "must be an address");
3016 assert(dest->is_register(), "must be a register");
3017
3018 PatchingStub* patch = nullptr;
3019 if (patch_code != lir_patch_none) {
3020 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3021 }
3022
3023 Register reg = dest->as_pointer_register();
3024 LIR_Address* addr = src->as_address_ptr();
3025 __ lea(reg, as_Address(addr));
3026
3027 if (patch != nullptr) {
3028 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3029 }
3030 }
3031
3032
3033
3034 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3035 assert(!tmp->is_valid(), "don't need temporary");
3036 __ call(RuntimeAddress(dest));
3037 if (info != nullptr) {
3038 add_call_info_here(info);
3039 }
3040 __ post_call_nop();
3041 }
3042
3043
3044 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3045 assert(type == T_LONG, "only for volatile long fields");
3046
3047 if (info != nullptr) {
3048 add_debug_info_for_null_check_here(info);
3049 }
3050
3051 if (src->is_double_xmm()) {
3052 if (dest->is_double_cpu()) {
3053 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3054 } else if (dest->is_double_stack()) {
3055 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3056 } else if (dest->is_address()) {
3057 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3058 } else {
3059 ShouldNotReachHere();
3060 }
3061
3062 } else if (dest->is_double_xmm()) {
3063 if (src->is_double_stack()) {
3064 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3065 } else if (src->is_address()) {
3066 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3067 } else {
3068 ShouldNotReachHere();
3069 }
3070
3071 } else {
3072 ShouldNotReachHere();
3073 }
3074 }
3075
3076 #ifdef ASSERT
3077 // emit run-time assertion
3078 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3079 assert(op->code() == lir_assert, "must be");
3080
3081 if (op->in_opr1()->is_valid()) {
3082 assert(op->in_opr2()->is_valid(), "both operands must be valid");
3083 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3084 } else {
3085 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3086 assert(op->condition() == lir_cond_always, "no other conditions allowed");
3087 }
3088
3089 Label ok;
3090 if (op->condition() != lir_cond_always) {
3091 Assembler::Condition acond = Assembler::zero;
3092 switch (op->condition()) {
3093 case lir_cond_equal: acond = Assembler::equal; break;
3094 case lir_cond_notEqual: acond = Assembler::notEqual; break;
3095 case lir_cond_less: acond = Assembler::less; break;
3096 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3097 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3098 case lir_cond_greater: acond = Assembler::greater; break;
3099 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
3100 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
3101 default: ShouldNotReachHere();
3102 }
3103 __ jcc(acond, ok);
3104 }
3105 if (op->halt()) {
3106 const char* str = __ code_string(op->msg());
3107 __ stop(str);
3108 } else {
3109 breakpoint();
3110 }
3111 __ bind(ok);
3112 }
3113 #endif
3114
3115 void LIR_Assembler::membar() {
3116 // QQQ sparc TSO uses this,
3117 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3118 }
3119
3120 void LIR_Assembler::membar_acquire() {
3121 // No x86 machines currently require load fences
3122 }
3123
3124 void LIR_Assembler::membar_release() {
3125 // No x86 machines currently require store fences
3126 }
3127
3128 void LIR_Assembler::membar_loadload() {
3129 // no-op
3130 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3131 }
3132
3133 void LIR_Assembler::membar_storestore() {
3134 // no-op
3135 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3136 }
3137
3138 void LIR_Assembler::membar_loadstore() {
3139 // no-op
3140 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3141 }
3142
3143 void LIR_Assembler::membar_storeload() {
3144 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3145 }
3146
3147 void LIR_Assembler::on_spin_wait() {
3148 __ pause ();
3149 }
3150
3151 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3152 assert(result_reg->is_register(), "check");
3153 __ mov(result_reg->as_register(), r15_thread);
3154 }
3155
3156
3157 void LIR_Assembler::peephole(LIR_List*) {
3158 // do nothing for now
3159 }
3160
3161 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3162 assert(data == dest, "xchg/xadd uses only 2 operands");
3163
3164 if (data->type() == T_INT) {
3165 if (code == lir_xadd) {
3166 __ lock();
3167 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3168 } else {
3169 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3170 }
3171 } else if (data->is_oop()) {
3172 assert (code == lir_xchg, "xadd for oops");
3173 Register obj = data->as_register();
3174 if (UseCompressedOops) {
3175 __ encode_heap_oop(obj);
3176 __ xchgl(obj, as_Address(src->as_address_ptr()));
3177 __ decode_heap_oop(obj);
3178 } else {
3179 __ xchgptr(obj, as_Address(src->as_address_ptr()));
3180 }
3181 } else if (data->type() == T_LONG) {
3182 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3183 if (code == lir_xadd) {
3184 __ lock();
3185 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3186 } else {
3187 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3188 }
3189 } else {
3190 ShouldNotReachHere();
3191 }
3192 }
3193
3194 #undef __