1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "code/aotCodeCache.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gc_globals.hpp"
39 #include "nativeInst_x86.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_x86.inline.hpp"
47
48
49 // These masks are used to provide 128-bit aligned bitmasks to the XMM
50 // instructions, to allow sign-masking or sign-bit flipping. They allow
51 // fast versions of NegF/NegD and AbsF/AbsD.
52
53 // Note: 'double' and 'long long' have 32-bits alignment on x86.
54 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
55 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
56 // of 128-bits operands for SSE instructions.
57 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
58 // Store the value to a 128-bits operand.
59 operand[0] = lo;
60 operand[1] = hi;
61 return operand;
62 }
63
64 // Buffer for 128-bits masks used by SSE instructions.
65 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
66
67 // Static initialization during VM startup.
68 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
69 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
70 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
71 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
72
73
74 NEEDS_CLEANUP // remove this definitions ?
75 const Register SYNC_header = rax; // synchronization header
76 const Register SHIFT_count = rcx; // where count for shift operations must be
77
78 #define __ _masm->
79
80
81 static void select_different_registers(Register preserve,
82 Register extra,
83 Register &tmp1,
84 Register &tmp2) {
85 if (tmp1 == preserve) {
86 assert_different_registers(tmp1, tmp2, extra);
87 tmp1 = extra;
88 } else if (tmp2 == preserve) {
89 assert_different_registers(tmp1, tmp2, extra);
90 tmp2 = extra;
91 }
92 assert_different_registers(preserve, tmp1, tmp2);
93 }
94
95
96
97 static void select_different_registers(Register preserve,
98 Register extra,
99 Register &tmp1,
100 Register &tmp2,
101 Register &tmp3) {
102 if (tmp1 == preserve) {
103 assert_different_registers(tmp1, tmp2, tmp3, extra);
104 tmp1 = extra;
105 } else if (tmp2 == preserve) {
106 assert_different_registers(tmp1, tmp2, tmp3, extra);
107 tmp2 = extra;
108 } else if (tmp3 == preserve) {
109 assert_different_registers(tmp1, tmp2, tmp3, extra);
110 tmp3 = extra;
111 }
112 assert_different_registers(preserve, tmp1, tmp2, tmp3);
113 }
114
115
116
117 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
118 if (opr->is_constant()) {
119 LIR_Const* constant = opr->as_constant_ptr();
120 switch (constant->type()) {
121 case T_INT: {
122 return true;
123 }
124
125 default:
126 return false;
127 }
128 }
129 return false;
130 }
131
132
133 LIR_Opr LIR_Assembler::receiverOpr() {
134 return FrameMap::receiver_opr;
135 }
136
137 LIR_Opr LIR_Assembler::osrBufferPointer() {
138 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
139 }
140
141 //--------------fpu register translations-----------------------
142
143
144 address LIR_Assembler::float_constant(float f) {
145 address const_addr = __ float_constant(f);
146 if (const_addr == nullptr) {
147 bailout("const section overflow");
148 return __ code()->consts()->start();
149 } else {
150 return const_addr;
151 }
152 }
153
154
155 address LIR_Assembler::double_constant(double d) {
156 address const_addr = __ double_constant(d);
157 if (const_addr == nullptr) {
158 bailout("const section overflow");
159 return __ code()->consts()->start();
160 } else {
161 return const_addr;
162 }
163 }
164
165 void LIR_Assembler::breakpoint() {
166 __ int3();
167 }
168
169 void LIR_Assembler::push(LIR_Opr opr) {
170 if (opr->is_single_cpu()) {
171 __ push_reg(opr->as_register());
172 } else if (opr->is_double_cpu()) {
173 __ push_reg(opr->as_register_lo());
174 } else if (opr->is_stack()) {
175 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
176 } else if (opr->is_constant()) {
177 LIR_Const* const_opr = opr->as_constant_ptr();
178 if (const_opr->type() == T_OBJECT) {
179 __ push_oop(const_opr->as_jobject(), rscratch1);
180 } else if (const_opr->type() == T_INT) {
181 __ push_jint(const_opr->as_jint());
182 } else {
183 ShouldNotReachHere();
184 }
185
186 } else {
187 ShouldNotReachHere();
188 }
189 }
190
191 void LIR_Assembler::pop(LIR_Opr opr) {
192 if (opr->is_single_cpu()) {
193 __ pop_reg(opr->as_register());
194 } else {
195 ShouldNotReachHere();
196 }
197 }
198
199 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
200 return addr->base()->is_illegal() && addr->index()->is_illegal();
201 }
202
203 //-------------------------------------------
204
205 Address LIR_Assembler::as_Address(LIR_Address* addr) {
206 return as_Address(addr, rscratch1);
207 }
208
209 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
210 if (addr->base()->is_illegal()) {
211 assert(addr->index()->is_illegal(), "must be illegal too");
212 AddressLiteral laddr((address)addr->disp(), relocInfo::none);
213 if (! __ reachable(laddr)) {
214 __ movptr(tmp, laddr.addr());
215 Address res(tmp, 0);
216 return res;
217 } else {
218 return __ as_Address(laddr);
219 }
220 }
221
222 Register base = addr->base()->as_pointer_register();
223
224 if (addr->index()->is_illegal()) {
225 return Address( base, addr->disp());
226 } else if (addr->index()->is_cpu_register()) {
227 Register index = addr->index()->as_pointer_register();
228 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
229 } else if (addr->index()->is_constant()) {
230 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
231 assert(Assembler::is_simm32(addr_offset), "must be");
232
233 return Address(base, addr_offset);
234 } else {
235 Unimplemented();
236 return Address();
237 }
238 }
239
240
241 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
242 Address base = as_Address(addr);
243 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
244 }
245
246
247 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
248 return as_Address(addr);
249 }
250
251
252 void LIR_Assembler::osr_entry() {
253 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
254 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
255 ValueStack* entry_state = osr_entry->state();
256 int number_of_locks = entry_state->locks_size();
257
258 // we jump here if osr happens with the interpreter
259 // state set up to continue at the beginning of the
260 // loop that triggered osr - in particular, we have
261 // the following registers setup:
262 //
263 // rcx: osr buffer
264 //
265
266 // build frame
267 ciMethod* m = compilation()->method();
268 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
269
270 // OSR buffer is
271 //
272 // locals[nlocals-1..0]
273 // monitors[0..number_of_locks]
274 //
275 // locals is a direct copy of the interpreter frame so in the osr buffer
276 // so first slot in the local array is the last local from the interpreter
277 // and last slot is local[0] (receiver) from the interpreter
278 //
279 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
280 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
281 // in the interpreter frame (the method lock if a sync method)
282
283 // Initialize monitors in the compiled activation.
284 // rcx: pointer to osr buffer
285 //
286 // All other registers are dead at this point and the locals will be
287 // copied into place by code emitted in the IR.
288
289 Register OSR_buf = osrBufferPointer()->as_pointer_register();
290 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
291 int monitor_offset = BytesPerWord * method()->max_locals() +
292 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
293 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
294 // the OSR buffer using 2 word entries: first the lock and then
295 // the oop.
296 for (int i = 0; i < number_of_locks; i++) {
297 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
298 #ifdef ASSERT
299 // verify the interpreter's monitor has a non-null object
300 {
301 Label L;
302 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
303 __ jcc(Assembler::notZero, L);
304 __ stop("locked object is null");
305 __ bind(L);
306 }
307 #endif
308 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
309 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
310 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
311 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
312 }
313 }
314 }
315
316
317 // inline cache check; done before the frame is built.
318 int LIR_Assembler::check_icache() {
319 return __ ic_check(CodeEntryAlignment);
320 }
321
322 void LIR_Assembler::clinit_barrier(ciMethod* method) {
323 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
324 assert(!method->holder()->is_not_initialized(), "initialization should have been started");
325
326 Label L_skip_barrier;
327 Register klass = rscratch1;
328
329 __ mov_metadata(klass, method->holder()->constant_encoding());
330 __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
331
332 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
333
334 __ bind(L_skip_barrier);
335 }
336
337 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
338 jobject o = nullptr;
339 PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
340 __ movoop(reg, o);
341 patching_epilog(patch, lir_patch_normal, reg, info);
342 }
343
344 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
345 Metadata* o = nullptr;
346 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
347 __ mov_metadata(reg, o);
348 patching_epilog(patch, lir_patch_normal, reg, info);
349 }
350
351 // This specifies the rsp decrement needed to build the frame
352 int LIR_Assembler::initial_frame_size_in_bytes() const {
353 // if rounding, must let FrameMap know!
354
355 // The frame_map records size in slots (32bit word)
356
357 // subtract two words to account for return address and link
358 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
359 }
360
361
362 int LIR_Assembler::emit_exception_handler() {
363 // generate code for exception handler
364 address handler_base = __ start_a_stub(exception_handler_size());
365 if (handler_base == nullptr) {
366 // not enough space left for the handler
367 bailout("exception handler overflow");
368 return -1;
369 }
370
371 int offset = code_offset();
372
373 // the exception oop and pc are in rax, and rdx
374 // no other registers need to be preserved, so invalidate them
375 __ invalidate_registers(false, true, true, false, true, true);
376
377 // check that there is really an exception
378 __ verify_not_null_oop(rax);
379
380 // search an exception handler (rax: exception oop, rdx: throwing pc)
381 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
382 __ should_not_reach_here();
383 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
384 __ end_a_stub();
385
386 return offset;
387 }
388
389
390 // Emit the code to remove the frame from the stack in the exception
391 // unwind path.
392 int LIR_Assembler::emit_unwind_handler() {
393 #ifndef PRODUCT
394 if (CommentedAssembly) {
395 _masm->block_comment("Unwind handler");
396 }
397 #endif
398
399 int offset = code_offset();
400
401 // Fetch the exception from TLS and clear out exception related thread state
402 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
403 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
404 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
405
406 __ bind(_unwind_handler_entry);
407 __ verify_not_null_oop(rax);
408 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
409 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
410 }
411
412 // Perform needed unlocking
413 MonitorExitStub* stub = nullptr;
414 if (method()->is_synchronized()) {
415 monitor_address(0, FrameMap::rax_opr);
416 stub = new MonitorExitStub(FrameMap::rax_opr, 0);
417 __ unlock_object(rdi, rsi, rax, *stub->entry());
418 __ bind(*stub->continuation());
419 }
420
421 if (compilation()->env()->dtrace_method_probes()) {
422 __ mov(rdi, r15_thread);
423 __ mov_metadata(rsi, method()->constant_encoding());
424 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
425 }
426
427 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
428 __ mov(rax, rbx); // Restore the exception
429 }
430
431 // remove the activation and dispatch to the unwind handler
432 __ remove_frame(initial_frame_size_in_bytes());
433 __ jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
434
435 // Emit the slow path assembly
436 if (stub != nullptr) {
437 stub->emit_code(this);
438 }
439
440 return offset;
441 }
442
443
444 int LIR_Assembler::emit_deopt_handler() {
445 // generate code for exception handler
446 address handler_base = __ start_a_stub(deopt_handler_size());
447 if (handler_base == nullptr) {
448 // not enough space left for the handler
449 bailout("deopt handler overflow");
450 return -1;
451 }
452
453 int offset = code_offset();
454
455 Label start;
456 __ bind(start);
457
458 __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
459
460 int entry_offset = __ offset();
461
462 __ jmp(start);
463
464 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
465 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
466 "out of bounds read in post-call NOP check");
467 __ end_a_stub();
468
469 return entry_offset;
470 }
471
472 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
473 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
474 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
475 assert(result->fpu() == 0, "result must already be on TOS");
476 }
477
478 // Pop the stack before the safepoint code
479 __ remove_frame(initial_frame_size_in_bytes());
480
481 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
482 __ reserved_stack_check();
483 }
484
485 // Note: we do not need to round double result; float result has the right precision
486 // the poll sets the condition code, but no data registers
487
488 code_stub->set_safepoint_offset(__ offset());
489 __ relocate(relocInfo::poll_return_type);
490 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
491 __ ret(0);
492 }
493
494
495 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
496 guarantee(info != nullptr, "Shouldn't be null");
497 int offset = __ offset();
498 const Register poll_addr = rscratch1;
499 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
500 add_debug_info_for_branch(info);
501 __ relocate(relocInfo::poll_type);
502 address pre_pc = __ pc();
503 __ testl(rax, Address(poll_addr, 0));
504 address post_pc = __ pc();
505 guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length");
506 return offset;
507 }
508
509
510 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
511 if (from_reg != to_reg) __ mov(to_reg, from_reg);
512 }
513
514 void LIR_Assembler::swap_reg(Register a, Register b) {
515 __ xchgptr(a, b);
516 }
517
518
519 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
520 assert(src->is_constant(), "should not call otherwise");
521 assert(dest->is_register(), "should not call otherwise");
522 LIR_Const* c = src->as_constant_ptr();
523
524 switch (c->type()) {
525 case T_INT: {
526 assert(patch_code == lir_patch_none, "no patching handled here");
527 __ movl(dest->as_register(), c->as_jint());
528 break;
529 }
530
531 case T_ADDRESS: {
532 assert(patch_code == lir_patch_none, "no patching handled here");
533 __ movptr(dest->as_register(), c->as_jint());
534 break;
535 }
536
537 case T_LONG: {
538 assert(patch_code == lir_patch_none, "no patching handled here");
539 #if INCLUDE_CDS
540 if (AOTCodeCache::is_on_for_dump()) {
541 address b = c->as_pointer();
542 if (AOTRuntimeConstants::contains(b)) {
543 __ load_aotrc_address(dest->as_register_lo(), b);
544 break;
545 }
546 }
547 #endif
548 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
549 break;
550 }
551
552 case T_OBJECT: {
553 if (patch_code != lir_patch_none) {
554 jobject2reg_with_patching(dest->as_register(), info);
555 } else {
556 __ movoop(dest->as_register(), c->as_jobject());
557 }
558 break;
559 }
560
561 case T_METADATA: {
562 if (patch_code != lir_patch_none) {
563 klass2reg_with_patching(dest->as_register(), info);
564 } else {
565 __ mov_metadata(dest->as_register(), c->as_metadata());
566 }
567 break;
568 }
569
570 case T_FLOAT: {
571 if (dest->is_single_xmm()) {
572 if (UseAVX <= 2 && c->is_zero_float()) {
573 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
574 } else {
575 __ movflt(dest->as_xmm_float_reg(),
576 InternalAddress(float_constant(c->as_jfloat())));
577 }
578 } else {
579 ShouldNotReachHere();
580 }
581 break;
582 }
583
584 case T_DOUBLE: {
585 if (dest->is_double_xmm()) {
586 if (UseAVX <= 2 && c->is_zero_double()) {
587 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
588 } else {
589 __ movdbl(dest->as_xmm_double_reg(),
590 InternalAddress(double_constant(c->as_jdouble())));
591 }
592 } else {
593 ShouldNotReachHere();
594 }
595 break;
596 }
597
598 default:
599 ShouldNotReachHere();
600 }
601 }
602
603 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
604 assert(src->is_constant(), "should not call otherwise");
605 assert(dest->is_stack(), "should not call otherwise");
606 LIR_Const* c = src->as_constant_ptr();
607
608 switch (c->type()) {
609 case T_INT: // fall through
610 case T_FLOAT:
611 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
612 break;
613
614 case T_ADDRESS:
615 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
616 break;
617
618 case T_OBJECT:
619 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
620 break;
621
622 case T_LONG: // fall through
623 case T_DOUBLE:
624 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
625 lo_word_offset_in_bytes),
626 (intptr_t)c->as_jlong_bits(),
627 rscratch1);
628 break;
629
630 default:
631 ShouldNotReachHere();
632 }
633 }
634
635 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
636 assert(src->is_constant(), "should not call otherwise");
637 assert(dest->is_address(), "should not call otherwise");
638 LIR_Const* c = src->as_constant_ptr();
639 LIR_Address* addr = dest->as_address_ptr();
640
641 int null_check_here = code_offset();
642 switch (type) {
643 case T_INT: // fall through
644 case T_FLOAT:
645 __ movl(as_Address(addr), c->as_jint_bits());
646 break;
647
648 case T_ADDRESS:
649 __ movptr(as_Address(addr), c->as_jint_bits());
650 break;
651
652 case T_OBJECT: // fall through
653 case T_ARRAY:
654 if (c->as_jobject() == nullptr) {
655 if (UseCompressedOops && !wide) {
656 __ movl(as_Address(addr), NULL_WORD);
657 } else {
658 __ xorptr(rscratch1, rscratch1);
659 null_check_here = code_offset();
660 __ movptr(as_Address(addr), rscratch1);
661 }
662 } else {
663 if (is_literal_address(addr)) {
664 ShouldNotReachHere();
665 __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1);
666 } else {
667 __ movoop(rscratch1, c->as_jobject());
668 if (UseCompressedOops && !wide) {
669 __ encode_heap_oop(rscratch1);
670 null_check_here = code_offset();
671 __ movl(as_Address_lo(addr), rscratch1);
672 } else {
673 null_check_here = code_offset();
674 __ movptr(as_Address_lo(addr), rscratch1);
675 }
676 }
677 }
678 break;
679
680 case T_LONG: // fall through
681 case T_DOUBLE:
682 if (is_literal_address(addr)) {
683 ShouldNotReachHere();
684 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
685 } else {
686 __ movptr(r10, (intptr_t)c->as_jlong_bits());
687 null_check_here = code_offset();
688 __ movptr(as_Address_lo(addr), r10);
689 }
690 break;
691
692 case T_BOOLEAN: // fall through
693 case T_BYTE:
694 __ movb(as_Address(addr), c->as_jint() & 0xFF);
695 break;
696
697 case T_CHAR: // fall through
698 case T_SHORT:
699 __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
700 break;
701
702 default:
703 ShouldNotReachHere();
704 };
705
706 if (info != nullptr) {
707 add_debug_info_for_null_check(null_check_here, info);
708 }
709 }
710
711
712 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
713 assert(src->is_register(), "should not call otherwise");
714 assert(dest->is_register(), "should not call otherwise");
715
716 // move between cpu-registers
717 if (dest->is_single_cpu()) {
718 if (src->type() == T_LONG) {
719 // Can do LONG -> OBJECT
720 move_regs(src->as_register_lo(), dest->as_register());
721 return;
722 }
723 assert(src->is_single_cpu(), "must match");
724 if (src->type() == T_OBJECT) {
725 __ verify_oop(src->as_register());
726 }
727 move_regs(src->as_register(), dest->as_register());
728
729 } else if (dest->is_double_cpu()) {
730 if (is_reference_type(src->type())) {
731 // Surprising to me but we can see move of a long to t_object
732 __ verify_oop(src->as_register());
733 move_regs(src->as_register(), dest->as_register_lo());
734 return;
735 }
736 assert(src->is_double_cpu(), "must match");
737 Register f_lo = src->as_register_lo();
738 Register f_hi = src->as_register_hi();
739 Register t_lo = dest->as_register_lo();
740 Register t_hi = dest->as_register_hi();
741 assert(f_hi == f_lo, "must be same");
742 assert(t_hi == t_lo, "must be same");
743 move_regs(f_lo, t_lo);
744
745 // move between xmm-registers
746 } else if (dest->is_single_xmm()) {
747 assert(src->is_single_xmm(), "must match");
748 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
749 } else if (dest->is_double_xmm()) {
750 assert(src->is_double_xmm(), "must match");
751 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
752
753 } else {
754 ShouldNotReachHere();
755 }
756 }
757
758 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
759 assert(src->is_register(), "should not call otherwise");
760 assert(dest->is_stack(), "should not call otherwise");
761
762 if (src->is_single_cpu()) {
763 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
764 if (is_reference_type(type)) {
765 __ verify_oop(src->as_register());
766 __ movptr (dst, src->as_register());
767 } else if (type == T_METADATA || type == T_ADDRESS) {
768 __ movptr (dst, src->as_register());
769 } else {
770 __ movl (dst, src->as_register());
771 }
772
773 } else if (src->is_double_cpu()) {
774 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
775 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
776 __ movptr (dstLO, src->as_register_lo());
777
778 } else if (src->is_single_xmm()) {
779 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
780 __ movflt(dst_addr, src->as_xmm_float_reg());
781
782 } else if (src->is_double_xmm()) {
783 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
784 __ movdbl(dst_addr, src->as_xmm_double_reg());
785
786 } else {
787 ShouldNotReachHere();
788 }
789 }
790
791
792 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
793 LIR_Address* to_addr = dest->as_address_ptr();
794 PatchingStub* patch = nullptr;
795 Register compressed_src = rscratch1;
796
797 if (is_reference_type(type)) {
798 __ verify_oop(src->as_register());
799 if (UseCompressedOops && !wide) {
800 __ movptr(compressed_src, src->as_register());
801 __ encode_heap_oop(compressed_src);
802 if (patch_code != lir_patch_none) {
803 info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
804 }
805 }
806 }
807
808 if (patch_code != lir_patch_none) {
809 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
810 Address toa = as_Address(to_addr);
811 assert(toa.disp() != 0, "must have");
812 }
813
814 int null_check_here = code_offset();
815 switch (type) {
816 case T_FLOAT: {
817 assert(src->is_single_xmm(), "not a float");
818 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
819 break;
820 }
821
822 case T_DOUBLE: {
823 assert(src->is_double_xmm(), "not a double");
824 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
825 break;
826 }
827
828 case T_ARRAY: // fall through
829 case T_OBJECT: // fall through
830 if (UseCompressedOops && !wide) {
831 __ movl(as_Address(to_addr), compressed_src);
832 } else {
833 __ movptr(as_Address(to_addr), src->as_register());
834 }
835 break;
836 case T_ADDRESS:
837 __ movptr(as_Address(to_addr), src->as_register());
838 break;
839 case T_INT:
840 __ movl(as_Address(to_addr), src->as_register());
841 break;
842
843 case T_LONG: {
844 Register from_lo = src->as_register_lo();
845 Register from_hi = src->as_register_hi();
846 __ movptr(as_Address_lo(to_addr), from_lo);
847 break;
848 }
849
850 case T_BYTE: // fall through
851 case T_BOOLEAN: {
852 Register src_reg = src->as_register();
853 Address dst_addr = as_Address(to_addr);
854 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
855 __ movb(dst_addr, src_reg);
856 break;
857 }
858
859 case T_CHAR: // fall through
860 case T_SHORT:
861 __ movw(as_Address(to_addr), src->as_register());
862 break;
863
864 default:
865 ShouldNotReachHere();
866 }
867 if (info != nullptr) {
868 add_debug_info_for_null_check(null_check_here, info);
869 }
870
871 if (patch_code != lir_patch_none) {
872 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
873 }
874 }
875
876
877 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
878 assert(src->is_stack(), "should not call otherwise");
879 assert(dest->is_register(), "should not call otherwise");
880
881 if (dest->is_single_cpu()) {
882 if (is_reference_type(type)) {
883 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
884 __ verify_oop(dest->as_register());
885 } else if (type == T_METADATA || type == T_ADDRESS) {
886 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
887 } else {
888 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
889 }
890
891 } else if (dest->is_double_cpu()) {
892 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
893 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
894 __ movptr(dest->as_register_lo(), src_addr_LO);
895
896 } else if (dest->is_single_xmm()) {
897 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
898 __ movflt(dest->as_xmm_float_reg(), src_addr);
899
900 } else if (dest->is_double_xmm()) {
901 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
902 __ movdbl(dest->as_xmm_double_reg(), src_addr);
903
904 } else {
905 ShouldNotReachHere();
906 }
907 }
908
909
910 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
911 if (src->is_single_stack()) {
912 if (is_reference_type(type)) {
913 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
914 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
915 } else {
916 //no pushl on 64bits
917 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
918 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
919 }
920
921 } else if (src->is_double_stack()) {
922 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
923 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
924
925 } else {
926 ShouldNotReachHere();
927 }
928 }
929
930
931 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
932 assert(src->is_address(), "should not call otherwise");
933 assert(dest->is_register(), "should not call otherwise");
934
935 LIR_Address* addr = src->as_address_ptr();
936 Address from_addr = as_Address(addr);
937
938 if (addr->base()->type() == T_OBJECT) {
939 __ verify_oop(addr->base()->as_pointer_register());
940 }
941
942 switch (type) {
943 case T_BOOLEAN: // fall through
944 case T_BYTE: // fall through
945 case T_CHAR: // fall through
946 case T_SHORT:
947 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
948 // on pre P6 processors we may get partial register stalls
949 // so blow away the value of to_rinfo before loading a
950 // partial word into it. Do it here so that it precedes
951 // the potential patch point below.
952 __ xorptr(dest->as_register(), dest->as_register());
953 }
954 break;
955 default:
956 break;
957 }
958
959 PatchingStub* patch = nullptr;
960 if (patch_code != lir_patch_none) {
961 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
962 assert(from_addr.disp() != 0, "must have");
963 }
964 if (info != nullptr) {
965 add_debug_info_for_null_check_here(info);
966 }
967
968 switch (type) {
969 case T_FLOAT: {
970 if (dest->is_single_xmm()) {
971 __ movflt(dest->as_xmm_float_reg(), from_addr);
972 } else {
973 ShouldNotReachHere();
974 }
975 break;
976 }
977
978 case T_DOUBLE: {
979 if (dest->is_double_xmm()) {
980 __ movdbl(dest->as_xmm_double_reg(), from_addr);
981 } else {
982 ShouldNotReachHere();
983 }
984 break;
985 }
986
987 case T_OBJECT: // fall through
988 case T_ARRAY: // fall through
989 if (UseCompressedOops && !wide) {
990 __ movl(dest->as_register(), from_addr);
991 } else {
992 __ movptr(dest->as_register(), from_addr);
993 }
994 break;
995
996 case T_ADDRESS:
997 __ movptr(dest->as_register(), from_addr);
998 break;
999 case T_INT:
1000 __ movl(dest->as_register(), from_addr);
1001 break;
1002
1003 case T_LONG: {
1004 Register to_lo = dest->as_register_lo();
1005 Register to_hi = dest->as_register_hi();
1006 __ movptr(to_lo, as_Address_lo(addr));
1007 break;
1008 }
1009
1010 case T_BOOLEAN: // fall through
1011 case T_BYTE: {
1012 Register dest_reg = dest->as_register();
1013 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1014 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1015 __ movsbl(dest_reg, from_addr);
1016 } else {
1017 __ movb(dest_reg, from_addr);
1018 __ shll(dest_reg, 24);
1019 __ sarl(dest_reg, 24);
1020 }
1021 break;
1022 }
1023
1024 case T_CHAR: {
1025 Register dest_reg = dest->as_register();
1026 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1027 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1028 __ movzwl(dest_reg, from_addr);
1029 } else {
1030 __ movw(dest_reg, from_addr);
1031 }
1032 break;
1033 }
1034
1035 case T_SHORT: {
1036 Register dest_reg = dest->as_register();
1037 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1038 __ movswl(dest_reg, from_addr);
1039 } else {
1040 __ movw(dest_reg, from_addr);
1041 __ shll(dest_reg, 16);
1042 __ sarl(dest_reg, 16);
1043 }
1044 break;
1045 }
1046
1047 default:
1048 ShouldNotReachHere();
1049 }
1050
1051 if (patch != nullptr) {
1052 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1053 }
1054
1055 if (is_reference_type(type)) {
1056 if (UseCompressedOops && !wide) {
1057 __ decode_heap_oop(dest->as_register());
1058 }
1059
1060 __ verify_oop(dest->as_register());
1061 }
1062 }
1063
1064
1065 NEEDS_CLEANUP; // This could be static?
1066 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1067 int elem_size = type2aelembytes(type);
1068 switch (elem_size) {
1069 case 1: return Address::times_1;
1070 case 2: return Address::times_2;
1071 case 4: return Address::times_4;
1072 case 8: return Address::times_8;
1073 }
1074 ShouldNotReachHere();
1075 return Address::no_scale;
1076 }
1077
1078
1079 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1080 switch (op->code()) {
1081 case lir_idiv:
1082 case lir_irem:
1083 arithmetic_idiv(op->code(),
1084 op->in_opr1(),
1085 op->in_opr2(),
1086 op->in_opr3(),
1087 op->result_opr(),
1088 op->info());
1089 break;
1090 case lir_fmad:
1091 __ fmad(op->result_opr()->as_xmm_double_reg(),
1092 op->in_opr1()->as_xmm_double_reg(),
1093 op->in_opr2()->as_xmm_double_reg(),
1094 op->in_opr3()->as_xmm_double_reg());
1095 break;
1096 case lir_fmaf:
1097 __ fmaf(op->result_opr()->as_xmm_float_reg(),
1098 op->in_opr1()->as_xmm_float_reg(),
1099 op->in_opr2()->as_xmm_float_reg(),
1100 op->in_opr3()->as_xmm_float_reg());
1101 break;
1102 default: ShouldNotReachHere(); break;
1103 }
1104 }
1105
1106 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1107 #ifdef ASSERT
1108 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1109 if (op->block() != nullptr) _branch_target_blocks.append(op->block());
1110 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1111 #endif
1112
1113 if (op->cond() == lir_cond_always) {
1114 if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1115 __ jmp (*(op->label()));
1116 } else {
1117 Assembler::Condition acond = Assembler::zero;
1118 if (op->code() == lir_cond_float_branch) {
1119 assert(op->ublock() != nullptr, "must have unordered successor");
1120 __ jcc(Assembler::parity, *(op->ublock()->label()));
1121 switch(op->cond()) {
1122 case lir_cond_equal: acond = Assembler::equal; break;
1123 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1124 case lir_cond_less: acond = Assembler::below; break;
1125 case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1126 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1127 case lir_cond_greater: acond = Assembler::above; break;
1128 default: ShouldNotReachHere();
1129 }
1130 } else {
1131 switch (op->cond()) {
1132 case lir_cond_equal: acond = Assembler::equal; break;
1133 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1134 case lir_cond_less: acond = Assembler::less; break;
1135 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1136 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1137 case lir_cond_greater: acond = Assembler::greater; break;
1138 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1139 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1140 default: ShouldNotReachHere();
1141 }
1142 }
1143 __ jcc(acond,*(op->label()));
1144 }
1145 }
1146
1147 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1148 LIR_Opr src = op->in_opr();
1149 LIR_Opr dest = op->result_opr();
1150
1151 switch (op->bytecode()) {
1152 case Bytecodes::_i2l:
1153 __ movl2ptr(dest->as_register_lo(), src->as_register());
1154 break;
1155
1156 case Bytecodes::_l2i:
1157 __ movl(dest->as_register(), src->as_register_lo());
1158 break;
1159
1160 case Bytecodes::_i2b:
1161 move_regs(src->as_register(), dest->as_register());
1162 __ sign_extend_byte(dest->as_register());
1163 break;
1164
1165 case Bytecodes::_i2c:
1166 move_regs(src->as_register(), dest->as_register());
1167 __ andl(dest->as_register(), 0xFFFF);
1168 break;
1169
1170 case Bytecodes::_i2s:
1171 move_regs(src->as_register(), dest->as_register());
1172 __ sign_extend_short(dest->as_register());
1173 break;
1174
1175 case Bytecodes::_f2d:
1176 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1177 break;
1178
1179 case Bytecodes::_d2f:
1180 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1181 break;
1182
1183 case Bytecodes::_i2f:
1184 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1185 break;
1186
1187 case Bytecodes::_i2d:
1188 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1189 break;
1190
1191 case Bytecodes::_l2f:
1192 __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo());
1193 break;
1194
1195 case Bytecodes::_l2d:
1196 __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo());
1197 break;
1198
1199 case Bytecodes::_f2i:
1200 __ convert_f2i(dest->as_register(), src->as_xmm_float_reg());
1201 break;
1202
1203 case Bytecodes::_d2i:
1204 __ convert_d2i(dest->as_register(), src->as_xmm_double_reg());
1205 break;
1206
1207 case Bytecodes::_f2l:
1208 __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg());
1209 break;
1210
1211 case Bytecodes::_d2l:
1212 __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg());
1213 break;
1214
1215 default: ShouldNotReachHere();
1216 }
1217 }
1218
1219 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1220 if (op->init_check()) {
1221 add_debug_info_for_null_check_here(op->stub()->info());
1222 // init_state needs acquire, but x86 is TSO, and so we are already good.
1223 __ cmpb(Address(op->klass()->as_register(),
1224 InstanceKlass::init_state_offset()),
1225 InstanceKlass::fully_initialized);
1226 __ jcc(Assembler::notEqual, *op->stub()->entry());
1227 }
1228 __ allocate_object(op->obj()->as_register(),
1229 op->tmp1()->as_register(),
1230 op->tmp2()->as_register(),
1231 op->header_size(),
1232 op->object_size(),
1233 op->klass()->as_register(),
1234 *op->stub()->entry());
1235 __ bind(*op->stub()->continuation());
1236 }
1237
1238 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1239 Register len = op->len()->as_register();
1240 __ movslq(len, len);
1241
1242 if (UseSlowPath ||
1243 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1244 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1245 __ jmp(*op->stub()->entry());
1246 } else {
1247 Register tmp1 = op->tmp1()->as_register();
1248 Register tmp2 = op->tmp2()->as_register();
1249 Register tmp3 = op->tmp3()->as_register();
1250 if (len == tmp1) {
1251 tmp1 = tmp3;
1252 } else if (len == tmp2) {
1253 tmp2 = tmp3;
1254 } else if (len == tmp3) {
1255 // everything is ok
1256 } else {
1257 __ mov(tmp3, len);
1258 }
1259 __ allocate_array(op->obj()->as_register(),
1260 len,
1261 tmp1,
1262 tmp2,
1263 arrayOopDesc::base_offset_in_bytes(op->type()),
1264 array_element_size(op->type()),
1265 op->klass()->as_register(),
1266 *op->stub()->entry(),
1267 op->zero_array());
1268 }
1269 __ bind(*op->stub()->continuation());
1270 }
1271
1272 void LIR_Assembler::type_profile_helper(Register mdo,
1273 ciMethodData *md, ciProfileData *data,
1274 Register recv) {
1275 int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
1276 __ profile_receiver_type(recv, mdo, mdp_offset);
1277 }
1278
1279 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1280 // we always need a stub for the failure case.
1281 CodeStub* stub = op->stub();
1282 Register obj = op->object()->as_register();
1283 Register k_RInfo = op->tmp1()->as_register();
1284 Register klass_RInfo = op->tmp2()->as_register();
1285 Register dst = op->result_opr()->as_register();
1286 ciKlass* k = op->klass();
1287 Register Rtmp1 = noreg;
1288 Register tmp_load_klass = rscratch1;
1289
1290 // check if it needs to be profiled
1291 ciMethodData* md = nullptr;
1292 ciProfileData* data = nullptr;
1293
1294 if (op->should_profile()) {
1295 ciMethod* method = op->profiled_method();
1296 assert(method != nullptr, "Should have method");
1297 int bci = op->profiled_bci();
1298 md = method->method_data_or_null();
1299 assert(md != nullptr, "Sanity");
1300 data = md->bci_to_data(bci);
1301 assert(data != nullptr, "need data for type check");
1302 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1303 }
1304 Label* success_target = success;
1305 Label* failure_target = failure;
1306
1307 if (obj == k_RInfo) {
1308 k_RInfo = dst;
1309 } else if (obj == klass_RInfo) {
1310 klass_RInfo = dst;
1311 }
1312 if (k->is_loaded() && !UseCompressedClassPointers) {
1313 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1314 } else {
1315 Rtmp1 = op->tmp3()->as_register();
1316 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1317 }
1318
1319 assert_different_registers(obj, k_RInfo, klass_RInfo);
1320
1321 __ testptr(obj, obj);
1322 if (op->should_profile()) {
1323 Label not_null;
1324 Register mdo = klass_RInfo;
1325 __ mov_metadata(mdo, md->constant_encoding());
1326 __ jccb(Assembler::notEqual, not_null);
1327 // Object is null; update MDO and exit
1328 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1329 int header_bits = BitData::null_seen_byte_constant();
1330 __ orb(data_addr, header_bits);
1331 __ jmp(*obj_is_null);
1332 __ bind(not_null);
1333
1334 Register recv = k_RInfo;
1335 __ load_klass(recv, obj, tmp_load_klass);
1336 type_profile_helper(mdo, md, data, recv);
1337 } else {
1338 __ jcc(Assembler::equal, *obj_is_null);
1339 }
1340
1341 if (!k->is_loaded()) {
1342 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1343 } else {
1344 __ mov_metadata(k_RInfo, k->constant_encoding());
1345 }
1346 __ verify_oop(obj);
1347
1348 if (op->fast_check()) {
1349 // get object class
1350 // not a safepoint as obj null check happens earlier
1351 if (UseCompressedClassPointers) {
1352 __ load_klass(Rtmp1, obj, tmp_load_klass);
1353 __ cmpptr(k_RInfo, Rtmp1);
1354 } else {
1355 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1356 }
1357 __ jcc(Assembler::notEqual, *failure_target);
1358 // successful cast, fall through to profile or jump
1359 } else {
1360 // get object class
1361 // not a safepoint as obj null check happens earlier
1362 __ load_klass(klass_RInfo, obj, tmp_load_klass);
1363 if (k->is_loaded()) {
1364 // See if we get an immediate positive hit
1365 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1366 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1367 __ jcc(Assembler::notEqual, *failure_target);
1368 // successful cast, fall through to profile or jump
1369 } else {
1370 // See if we get an immediate positive hit
1371 __ jcc(Assembler::equal, *success_target);
1372 // check for self
1373 __ cmpptr(klass_RInfo, k_RInfo);
1374 __ jcc(Assembler::equal, *success_target);
1375
1376 __ push_ppx(klass_RInfo);
1377 __ push_ppx(k_RInfo);
1378 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1379 __ pop_ppx(klass_RInfo);
1380 __ pop_ppx(klass_RInfo);
1381 // result is a boolean
1382 __ testl(klass_RInfo, klass_RInfo);
1383 __ jcc(Assembler::equal, *failure_target);
1384 // successful cast, fall through to profile or jump
1385 }
1386 } else {
1387 // perform the fast part of the checking logic
1388 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1389 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1390 __ push_ppx(klass_RInfo);
1391 __ push_ppx(k_RInfo);
1392 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1393 __ pop_ppx(klass_RInfo);
1394 __ pop_ppx(k_RInfo);
1395 // result is a boolean
1396 __ testl(k_RInfo, k_RInfo);
1397 __ jcc(Assembler::equal, *failure_target);
1398 // successful cast, fall through to profile or jump
1399 }
1400 }
1401 __ jmp(*success);
1402 }
1403
1404
1405 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1406 Register tmp_load_klass = rscratch1;
1407 LIR_Code code = op->code();
1408 if (code == lir_store_check) {
1409 Register value = op->object()->as_register();
1410 Register array = op->array()->as_register();
1411 Register k_RInfo = op->tmp1()->as_register();
1412 Register klass_RInfo = op->tmp2()->as_register();
1413 Register Rtmp1 = op->tmp3()->as_register();
1414
1415 CodeStub* stub = op->stub();
1416
1417 // check if it needs to be profiled
1418 ciMethodData* md = nullptr;
1419 ciProfileData* data = nullptr;
1420
1421 if (op->should_profile()) {
1422 ciMethod* method = op->profiled_method();
1423 assert(method != nullptr, "Should have method");
1424 int bci = op->profiled_bci();
1425 md = method->method_data_or_null();
1426 assert(md != nullptr, "Sanity");
1427 data = md->bci_to_data(bci);
1428 assert(data != nullptr, "need data for type check");
1429 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1430 }
1431 Label done;
1432 Label* success_target = &done;
1433 Label* failure_target = stub->entry();
1434
1435 __ testptr(value, value);
1436 if (op->should_profile()) {
1437 Label not_null;
1438 Register mdo = klass_RInfo;
1439 __ mov_metadata(mdo, md->constant_encoding());
1440 __ jccb(Assembler::notEqual, not_null);
1441 // Object is null; update MDO and exit
1442 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1443 int header_bits = BitData::null_seen_byte_constant();
1444 __ orb(data_addr, header_bits);
1445 __ jmp(done);
1446 __ bind(not_null);
1447
1448 Register recv = k_RInfo;
1449 __ load_klass(recv, value, tmp_load_klass);
1450 type_profile_helper(mdo, md, data, recv);
1451 } else {
1452 __ jcc(Assembler::equal, done);
1453 }
1454
1455 add_debug_info_for_null_check_here(op->info_for_exception());
1456 __ load_klass(k_RInfo, array, tmp_load_klass);
1457 __ load_klass(klass_RInfo, value, tmp_load_klass);
1458
1459 // get instance klass (it's already uncompressed)
1460 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1461 // perform the fast part of the checking logic
1462 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1463 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1464 __ push_ppx(klass_RInfo);
1465 __ push_ppx(k_RInfo);
1466 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1467 __ pop_ppx(klass_RInfo);
1468 __ pop_ppx(k_RInfo);
1469 // result is a boolean
1470 __ testl(k_RInfo, k_RInfo);
1471 __ jcc(Assembler::equal, *failure_target);
1472 // fall through to the success case
1473
1474 __ bind(done);
1475 } else
1476 if (code == lir_checkcast) {
1477 Register obj = op->object()->as_register();
1478 Register dst = op->result_opr()->as_register();
1479 Label success;
1480 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1481 __ bind(success);
1482 if (dst != obj) {
1483 __ mov(dst, obj);
1484 }
1485 } else
1486 if (code == lir_instanceof) {
1487 Register obj = op->object()->as_register();
1488 Register dst = op->result_opr()->as_register();
1489 Label success, failure, done;
1490 emit_typecheck_helper(op, &success, &failure, &failure);
1491 __ bind(failure);
1492 __ xorptr(dst, dst);
1493 __ jmpb(done);
1494 __ bind(success);
1495 __ movptr(dst, 1);
1496 __ bind(done);
1497 } else {
1498 ShouldNotReachHere();
1499 }
1500
1501 }
1502
1503
1504 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1505 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1506 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1507 Register newval = op->new_value()->as_register();
1508 Register cmpval = op->cmp_value()->as_register();
1509 assert(cmpval == rax, "wrong register");
1510 assert(newval != noreg, "new val must be register");
1511 assert(cmpval != newval, "cmp and new values must be in different registers");
1512 assert(cmpval != addr, "cmp and addr must be in different registers");
1513 assert(newval != addr, "new value and addr must be in different registers");
1514
1515 if (op->code() == lir_cas_obj) {
1516 if (UseCompressedOops) {
1517 __ encode_heap_oop(cmpval);
1518 __ mov(rscratch1, newval);
1519 __ encode_heap_oop(rscratch1);
1520 __ lock();
1521 // cmpval (rax) is implicitly used by this instruction
1522 __ cmpxchgl(rscratch1, Address(addr, 0));
1523 } else {
1524 __ lock();
1525 __ cmpxchgptr(newval, Address(addr, 0));
1526 }
1527 } else {
1528 assert(op->code() == lir_cas_int, "lir_cas_int expected");
1529 __ lock();
1530 __ cmpxchgl(newval, Address(addr, 0));
1531 }
1532 } else if (op->code() == lir_cas_long) {
1533 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1534 Register newval = op->new_value()->as_register_lo();
1535 Register cmpval = op->cmp_value()->as_register_lo();
1536 assert(cmpval == rax, "wrong register");
1537 assert(newval != noreg, "new val must be register");
1538 assert(cmpval != newval, "cmp and new values must be in different registers");
1539 assert(cmpval != addr, "cmp and addr must be in different registers");
1540 assert(newval != addr, "new value and addr must be in different registers");
1541 __ lock();
1542 __ cmpxchgq(newval, Address(addr, 0));
1543 } else {
1544 Unimplemented();
1545 }
1546 }
1547
1548 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1549 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1550 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
1551
1552 Assembler::Condition acond, ncond;
1553 switch (condition) {
1554 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
1555 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
1556 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
1557 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
1558 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
1559 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
1560 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
1561 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
1562 default: acond = Assembler::equal; ncond = Assembler::notEqual;
1563 ShouldNotReachHere();
1564 }
1565
1566 if (opr1->is_cpu_register()) {
1567 reg2reg(opr1, result);
1568 } else if (opr1->is_stack()) {
1569 stack2reg(opr1, result, result->type());
1570 } else if (opr1->is_constant()) {
1571 const2reg(opr1, result, lir_patch_none, nullptr);
1572 } else {
1573 ShouldNotReachHere();
1574 }
1575
1576 if (VM_Version::supports_cmov() && !opr2->is_constant()) {
1577 // optimized version that does not require a branch
1578 if (opr2->is_single_cpu()) {
1579 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1580 __ cmov(ncond, result->as_register(), opr2->as_register());
1581 } else if (opr2->is_double_cpu()) {
1582 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1583 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1584 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
1585 } else if (opr2->is_single_stack()) {
1586 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
1587 } else if (opr2->is_double_stack()) {
1588 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
1589 } else {
1590 ShouldNotReachHere();
1591 }
1592
1593 } else {
1594 Label skip;
1595 __ jccb(acond, skip);
1596 if (opr2->is_cpu_register()) {
1597 reg2reg(opr2, result);
1598 } else if (opr2->is_stack()) {
1599 stack2reg(opr2, result, result->type());
1600 } else if (opr2->is_constant()) {
1601 const2reg(opr2, result, lir_patch_none, nullptr);
1602 } else {
1603 ShouldNotReachHere();
1604 }
1605 __ bind(skip);
1606 }
1607 }
1608
1609
1610 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1611 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1612
1613 if (left->is_single_cpu()) {
1614 assert(left == dest, "left and dest must be equal");
1615 Register lreg = left->as_register();
1616
1617 if (right->is_single_cpu()) {
1618 // cpu register - cpu register
1619 Register rreg = right->as_register();
1620 switch (code) {
1621 case lir_add: __ addl (lreg, rreg); break;
1622 case lir_sub: __ subl (lreg, rreg); break;
1623 case lir_mul: __ imull(lreg, rreg); break;
1624 default: ShouldNotReachHere();
1625 }
1626
1627 } else if (right->is_stack()) {
1628 // cpu register - stack
1629 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1630 switch (code) {
1631 case lir_add: __ addl(lreg, raddr); break;
1632 case lir_sub: __ subl(lreg, raddr); break;
1633 default: ShouldNotReachHere();
1634 }
1635
1636 } else if (right->is_constant()) {
1637 // cpu register - constant
1638 jint c = right->as_constant_ptr()->as_jint();
1639 switch (code) {
1640 case lir_add: {
1641 __ incrementl(lreg, c);
1642 break;
1643 }
1644 case lir_sub: {
1645 __ decrementl(lreg, c);
1646 break;
1647 }
1648 default: ShouldNotReachHere();
1649 }
1650
1651 } else {
1652 ShouldNotReachHere();
1653 }
1654
1655 } else if (left->is_double_cpu()) {
1656 assert(left == dest, "left and dest must be equal");
1657 Register lreg_lo = left->as_register_lo();
1658 Register lreg_hi = left->as_register_hi();
1659
1660 if (right->is_double_cpu()) {
1661 // cpu register - cpu register
1662 Register rreg_lo = right->as_register_lo();
1663 Register rreg_hi = right->as_register_hi();
1664 assert_different_registers(lreg_lo, rreg_lo);
1665 switch (code) {
1666 case lir_add:
1667 __ addptr(lreg_lo, rreg_lo);
1668 break;
1669 case lir_sub:
1670 __ subptr(lreg_lo, rreg_lo);
1671 break;
1672 case lir_mul:
1673 __ imulq(lreg_lo, rreg_lo);
1674 break;
1675 default:
1676 ShouldNotReachHere();
1677 }
1678
1679 } else if (right->is_constant()) {
1680 // cpu register - constant
1681 jlong c = right->as_constant_ptr()->as_jlong_bits();
1682 __ movptr(r10, (intptr_t) c);
1683 switch (code) {
1684 case lir_add:
1685 __ addptr(lreg_lo, r10);
1686 break;
1687 case lir_sub:
1688 __ subptr(lreg_lo, r10);
1689 break;
1690 default:
1691 ShouldNotReachHere();
1692 }
1693
1694 } else {
1695 ShouldNotReachHere();
1696 }
1697
1698 } else if (left->is_single_xmm()) {
1699 assert(left == dest, "left and dest must be equal");
1700 XMMRegister lreg = left->as_xmm_float_reg();
1701
1702 if (right->is_single_xmm()) {
1703 XMMRegister rreg = right->as_xmm_float_reg();
1704 switch (code) {
1705 case lir_add: __ addss(lreg, rreg); break;
1706 case lir_sub: __ subss(lreg, rreg); break;
1707 case lir_mul: __ mulss(lreg, rreg); break;
1708 case lir_div: __ divss(lreg, rreg); break;
1709 default: ShouldNotReachHere();
1710 }
1711 } else {
1712 Address raddr;
1713 if (right->is_single_stack()) {
1714 raddr = frame_map()->address_for_slot(right->single_stack_ix());
1715 } else if (right->is_constant()) {
1716 // hack for now
1717 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
1718 } else {
1719 ShouldNotReachHere();
1720 }
1721 switch (code) {
1722 case lir_add: __ addss(lreg, raddr); break;
1723 case lir_sub: __ subss(lreg, raddr); break;
1724 case lir_mul: __ mulss(lreg, raddr); break;
1725 case lir_div: __ divss(lreg, raddr); break;
1726 default: ShouldNotReachHere();
1727 }
1728 }
1729
1730 } else if (left->is_double_xmm()) {
1731 assert(left == dest, "left and dest must be equal");
1732
1733 XMMRegister lreg = left->as_xmm_double_reg();
1734 if (right->is_double_xmm()) {
1735 XMMRegister rreg = right->as_xmm_double_reg();
1736 switch (code) {
1737 case lir_add: __ addsd(lreg, rreg); break;
1738 case lir_sub: __ subsd(lreg, rreg); break;
1739 case lir_mul: __ mulsd(lreg, rreg); break;
1740 case lir_div: __ divsd(lreg, rreg); break;
1741 default: ShouldNotReachHere();
1742 }
1743 } else {
1744 Address raddr;
1745 if (right->is_double_stack()) {
1746 raddr = frame_map()->address_for_slot(right->double_stack_ix());
1747 } else if (right->is_constant()) {
1748 // hack for now
1749 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
1750 } else {
1751 ShouldNotReachHere();
1752 }
1753 switch (code) {
1754 case lir_add: __ addsd(lreg, raddr); break;
1755 case lir_sub: __ subsd(lreg, raddr); break;
1756 case lir_mul: __ mulsd(lreg, raddr); break;
1757 case lir_div: __ divsd(lreg, raddr); break;
1758 default: ShouldNotReachHere();
1759 }
1760 }
1761
1762 } else if (left->is_single_stack() || left->is_address()) {
1763 assert(left == dest, "left and dest must be equal");
1764
1765 Address laddr;
1766 if (left->is_single_stack()) {
1767 laddr = frame_map()->address_for_slot(left->single_stack_ix());
1768 } else if (left->is_address()) {
1769 laddr = as_Address(left->as_address_ptr());
1770 } else {
1771 ShouldNotReachHere();
1772 }
1773
1774 if (right->is_single_cpu()) {
1775 Register rreg = right->as_register();
1776 switch (code) {
1777 case lir_add: __ addl(laddr, rreg); break;
1778 case lir_sub: __ subl(laddr, rreg); break;
1779 default: ShouldNotReachHere();
1780 }
1781 } else if (right->is_constant()) {
1782 jint c = right->as_constant_ptr()->as_jint();
1783 switch (code) {
1784 case lir_add: {
1785 __ incrementl(laddr, c);
1786 break;
1787 }
1788 case lir_sub: {
1789 __ decrementl(laddr, c);
1790 break;
1791 }
1792 default: ShouldNotReachHere();
1793 }
1794 } else {
1795 ShouldNotReachHere();
1796 }
1797
1798 } else {
1799 ShouldNotReachHere();
1800 }
1801 }
1802
1803
1804 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1805 if (value->is_double_xmm()) {
1806 switch(code) {
1807 case lir_abs :
1808 {
1809 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1810 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1811 }
1812 assert(!tmp->is_valid(), "do not need temporary");
1813 __ andpd(dest->as_xmm_double_reg(),
1814 ExternalAddress((address)double_signmask_pool),
1815 rscratch1);
1816 }
1817 break;
1818
1819 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1820 // all other intrinsics are not available in the SSE instruction set, so FPU is used
1821 default : ShouldNotReachHere();
1822 }
1823
1824 } else if (code == lir_f2hf) {
1825 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1826 } else if (code == lir_hf2f) {
1827 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1828 } else {
1829 Unimplemented();
1830 }
1831 }
1832
1833 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1834 // assert(left->destroys_register(), "check");
1835 if (left->is_single_cpu()) {
1836 Register reg = left->as_register();
1837 if (right->is_constant()) {
1838 int val = right->as_constant_ptr()->as_jint();
1839 switch (code) {
1840 case lir_logic_and: __ andl (reg, val); break;
1841 case lir_logic_or: __ orl (reg, val); break;
1842 case lir_logic_xor: __ xorl (reg, val); break;
1843 default: ShouldNotReachHere();
1844 }
1845 } else if (right->is_stack()) {
1846 // added support for stack operands
1847 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1848 switch (code) {
1849 case lir_logic_and: __ andl (reg, raddr); break;
1850 case lir_logic_or: __ orl (reg, raddr); break;
1851 case lir_logic_xor: __ xorl (reg, raddr); break;
1852 default: ShouldNotReachHere();
1853 }
1854 } else {
1855 Register rright = right->as_register();
1856 switch (code) {
1857 case lir_logic_and: __ andptr (reg, rright); break;
1858 case lir_logic_or : __ orptr (reg, rright); break;
1859 case lir_logic_xor: __ xorptr (reg, rright); break;
1860 default: ShouldNotReachHere();
1861 }
1862 }
1863 move_regs(reg, dst->as_register());
1864 } else {
1865 Register l_lo = left->as_register_lo();
1866 Register l_hi = left->as_register_hi();
1867 if (right->is_constant()) {
1868 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
1869 switch (code) {
1870 case lir_logic_and:
1871 __ andq(l_lo, rscratch1);
1872 break;
1873 case lir_logic_or:
1874 __ orq(l_lo, rscratch1);
1875 break;
1876 case lir_logic_xor:
1877 __ xorq(l_lo, rscratch1);
1878 break;
1879 default: ShouldNotReachHere();
1880 }
1881 } else {
1882 Register r_lo;
1883 if (is_reference_type(right->type())) {
1884 r_lo = right->as_register();
1885 } else {
1886 r_lo = right->as_register_lo();
1887 }
1888 switch (code) {
1889 case lir_logic_and:
1890 __ andptr(l_lo, r_lo);
1891 break;
1892 case lir_logic_or:
1893 __ orptr(l_lo, r_lo);
1894 break;
1895 case lir_logic_xor:
1896 __ xorptr(l_lo, r_lo);
1897 break;
1898 default: ShouldNotReachHere();
1899 }
1900 }
1901
1902 Register dst_lo = dst->as_register_lo();
1903 Register dst_hi = dst->as_register_hi();
1904
1905 move_regs(l_lo, dst_lo);
1906 }
1907 }
1908
1909
1910 // we assume that rax, and rdx can be overwritten
1911 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
1912
1913 assert(left->is_single_cpu(), "left must be register");
1914 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
1915 assert(result->is_single_cpu(), "result must be register");
1916
1917 // assert(left->destroys_register(), "check");
1918 // assert(right->destroys_register(), "check");
1919
1920 Register lreg = left->as_register();
1921 Register dreg = result->as_register();
1922
1923 if (right->is_constant()) {
1924 jint divisor = right->as_constant_ptr()->as_jint();
1925 assert(divisor > 0 && is_power_of_2(divisor), "must be");
1926 if (code == lir_idiv) {
1927 assert(lreg == rax, "must be rax,");
1928 assert(temp->as_register() == rdx, "tmp register must be rdx");
1929 __ cdql(); // sign extend into rdx:rax
1930 if (divisor == 2) {
1931 __ subl(lreg, rdx);
1932 } else {
1933 __ andl(rdx, divisor - 1);
1934 __ addl(lreg, rdx);
1935 }
1936 __ sarl(lreg, log2i_exact(divisor));
1937 move_regs(lreg, dreg);
1938 } else if (code == lir_irem) {
1939 Label done;
1940 __ mov(dreg, lreg);
1941 __ andl(dreg, 0x80000000 | (divisor - 1));
1942 __ jcc(Assembler::positive, done);
1943 __ decrement(dreg);
1944 __ orl(dreg, ~(divisor - 1));
1945 __ increment(dreg);
1946 __ bind(done);
1947 } else {
1948 ShouldNotReachHere();
1949 }
1950 } else {
1951 Register rreg = right->as_register();
1952 assert(lreg == rax, "left register must be rax,");
1953 assert(rreg != rdx, "right register must not be rdx");
1954 assert(temp->as_register() == rdx, "tmp register must be rdx");
1955
1956 move_regs(lreg, rax);
1957
1958 int idivl_offset = __ corrected_idivl(rreg);
1959 if (ImplicitDiv0Checks) {
1960 add_debug_info_for_div0(idivl_offset, info);
1961 }
1962 if (code == lir_irem) {
1963 move_regs(rdx, dreg); // result is in rdx
1964 } else {
1965 move_regs(rax, dreg);
1966 }
1967 }
1968 }
1969
1970
1971 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1972 if (opr1->is_single_cpu()) {
1973 Register reg1 = opr1->as_register();
1974 if (opr2->is_single_cpu()) {
1975 // cpu register - cpu register
1976 if (is_reference_type(opr1->type())) {
1977 __ cmpoop(reg1, opr2->as_register());
1978 } else {
1979 assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1980 __ cmpl(reg1, opr2->as_register());
1981 }
1982 } else if (opr2->is_stack()) {
1983 // cpu register - stack
1984 if (is_reference_type(opr1->type())) {
1985 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1986 } else {
1987 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1988 }
1989 } else if (opr2->is_constant()) {
1990 // cpu register - constant
1991 LIR_Const* c = opr2->as_constant_ptr();
1992 if (c->type() == T_INT) {
1993 jint i = c->as_jint();
1994 if (i == 0) {
1995 __ testl(reg1, reg1);
1996 } else {
1997 __ cmpl(reg1, i);
1998 }
1999 } else if (c->type() == T_METADATA) {
2000 // All we need for now is a comparison with null for equality.
2001 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
2002 Metadata* m = c->as_metadata();
2003 if (m == nullptr) {
2004 __ testptr(reg1, reg1);
2005 } else {
2006 ShouldNotReachHere();
2007 }
2008 } else if (is_reference_type(c->type())) {
2009 // In 64bit oops are single register
2010 jobject o = c->as_jobject();
2011 if (o == nullptr) {
2012 __ testptr(reg1, reg1);
2013 } else {
2014 __ cmpoop(reg1, o, rscratch1);
2015 }
2016 } else {
2017 fatal("unexpected type: %s", basictype_to_str(c->type()));
2018 }
2019 // cpu register - address
2020 } else if (opr2->is_address()) {
2021 if (op->info() != nullptr) {
2022 add_debug_info_for_null_check_here(op->info());
2023 }
2024 __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2025 } else {
2026 ShouldNotReachHere();
2027 }
2028
2029 } else if(opr1->is_double_cpu()) {
2030 Register xlo = opr1->as_register_lo();
2031 Register xhi = opr1->as_register_hi();
2032 if (opr2->is_double_cpu()) {
2033 __ cmpptr(xlo, opr2->as_register_lo());
2034 } else if (opr2->is_constant()) {
2035 // cpu register - constant 0
2036 assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2037 __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2038 } else {
2039 ShouldNotReachHere();
2040 }
2041
2042 } else if (opr1->is_single_xmm()) {
2043 XMMRegister reg1 = opr1->as_xmm_float_reg();
2044 if (opr2->is_single_xmm()) {
2045 // xmm register - xmm register
2046 __ ucomiss(reg1, opr2->as_xmm_float_reg());
2047 } else if (opr2->is_stack()) {
2048 // xmm register - stack
2049 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2050 } else if (opr2->is_constant()) {
2051 // xmm register - constant
2052 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2053 } else if (opr2->is_address()) {
2054 // xmm register - address
2055 if (op->info() != nullptr) {
2056 add_debug_info_for_null_check_here(op->info());
2057 }
2058 __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2059 } else {
2060 ShouldNotReachHere();
2061 }
2062
2063 } else if (opr1->is_double_xmm()) {
2064 XMMRegister reg1 = opr1->as_xmm_double_reg();
2065 if (opr2->is_double_xmm()) {
2066 // xmm register - xmm register
2067 __ ucomisd(reg1, opr2->as_xmm_double_reg());
2068 } else if (opr2->is_stack()) {
2069 // xmm register - stack
2070 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2071 } else if (opr2->is_constant()) {
2072 // xmm register - constant
2073 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2074 } else if (opr2->is_address()) {
2075 // xmm register - address
2076 if (op->info() != nullptr) {
2077 add_debug_info_for_null_check_here(op->info());
2078 }
2079 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2080 } else {
2081 ShouldNotReachHere();
2082 }
2083
2084 } else if (opr1->is_address() && opr2->is_constant()) {
2085 LIR_Const* c = opr2->as_constant_ptr();
2086 if (is_reference_type(c->type())) {
2087 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2088 __ movoop(rscratch1, c->as_jobject());
2089 }
2090 if (op->info() != nullptr) {
2091 add_debug_info_for_null_check_here(op->info());
2092 }
2093 // special case: address - constant
2094 LIR_Address* addr = opr1->as_address_ptr();
2095 if (c->type() == T_INT) {
2096 __ cmpl(as_Address(addr), c->as_jint());
2097 } else if (is_reference_type(c->type())) {
2098 // %%% Make this explode if addr isn't reachable until we figure out a
2099 // better strategy by giving noreg as the temp for as_Address
2100 __ cmpoop(rscratch1, as_Address(addr, noreg));
2101 } else {
2102 ShouldNotReachHere();
2103 }
2104
2105 } else {
2106 ShouldNotReachHere();
2107 }
2108 }
2109
2110 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2111 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2112 if (left->is_single_xmm()) {
2113 assert(right->is_single_xmm(), "must match");
2114 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2115 } else if (left->is_double_xmm()) {
2116 assert(right->is_double_xmm(), "must match");
2117 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2118
2119 } else {
2120 ShouldNotReachHere();
2121 }
2122 } else {
2123 assert(code == lir_cmp_l2i, "check");
2124 Label done;
2125 Register dest = dst->as_register();
2126 __ cmpptr(left->as_register_lo(), right->as_register_lo());
2127 __ movl(dest, -1);
2128 __ jccb(Assembler::less, done);
2129 __ setb(Assembler::notZero, dest);
2130 __ movzbl(dest, dest);
2131 __ bind(done);
2132 }
2133 }
2134
2135
2136 void LIR_Assembler::align_call(LIR_Code code) {
2137 // make sure that the displacement word of the call ends up word aligned
2138 int offset = __ offset();
2139 switch (code) {
2140 case lir_static_call:
2141 case lir_optvirtual_call:
2142 case lir_dynamic_call:
2143 offset += NativeCall::displacement_offset;
2144 break;
2145 case lir_icvirtual_call:
2146 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex;
2147 break;
2148 default: ShouldNotReachHere();
2149 }
2150 __ align(BytesPerWord, offset);
2151 }
2152
2153
2154 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2155 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2156 "must be aligned");
2157 __ call(AddressLiteral(op->addr(), rtype));
2158 add_call_info(code_offset(), op->info());
2159 __ post_call_nop();
2160 }
2161
2162
2163 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2164 __ ic_call(op->addr());
2165 add_call_info(code_offset(), op->info());
2166 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2167 "must be aligned");
2168 __ post_call_nop();
2169 }
2170
2171
2172 void LIR_Assembler::emit_static_call_stub() {
2173 address call_pc = __ pc();
2174 address stub = __ start_a_stub(call_stub_size());
2175 if (stub == nullptr) {
2176 bailout("static call stub overflow");
2177 return;
2178 }
2179
2180 int start = __ offset();
2181
2182 // make sure that the displacement word of the call ends up word aligned
2183 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset);
2184 __ relocate(static_stub_Relocation::spec(call_pc));
2185 __ mov_metadata(rbx, (Metadata*)nullptr);
2186 // must be set to -1 at code generation time
2187 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2188 // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2189 __ jump(RuntimeAddress(__ pc()));
2190
2191 assert(__ offset() - start <= call_stub_size(), "stub too big");
2192 __ end_a_stub();
2193 }
2194
2195
2196 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2197 assert(exceptionOop->as_register() == rax, "must match");
2198 assert(exceptionPC->as_register() == rdx, "must match");
2199
2200 // exception object is not added to oop map by LinearScan
2201 // (LinearScan assumes that no oops are in fixed registers)
2202 info->add_register_oop(exceptionOop);
2203 StubId unwind_id;
2204
2205 // get current pc information
2206 // pc is only needed if the method has an exception handler, the unwind code does not need it.
2207 int pc_for_athrow_offset = __ offset();
2208 InternalAddress pc_for_athrow(__ pc());
2209 __ lea(exceptionPC->as_register(), pc_for_athrow);
2210 add_call_info(pc_for_athrow_offset, info); // for exception handler
2211
2212 __ verify_not_null_oop(rax);
2213 // search an exception handler (rax: exception oop, rdx: throwing pc)
2214 if (compilation()->has_fpu_code()) {
2215 unwind_id = StubId::c1_handle_exception_id;
2216 } else {
2217 unwind_id = StubId::c1_handle_exception_nofpu_id;
2218 }
2219 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2220
2221 // enough room for two byte trap
2222 __ nop();
2223 }
2224
2225
2226 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2227 assert(exceptionOop->as_register() == rax, "must match");
2228
2229 __ jmp(_unwind_handler_entry);
2230 }
2231
2232
2233 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2234
2235 // optimized version for linear scan:
2236 // * count must be already in ECX (guaranteed by LinearScan)
2237 // * left and dest must be equal
2238 // * tmp must be unused
2239 assert(count->as_register() == SHIFT_count, "count must be in ECX");
2240 assert(left == dest, "left and dest must be equal");
2241 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2242
2243 if (left->is_single_cpu()) {
2244 Register value = left->as_register();
2245 assert(value != SHIFT_count, "left cannot be ECX");
2246
2247 switch (code) {
2248 case lir_shl: __ shll(value); break;
2249 case lir_shr: __ sarl(value); break;
2250 case lir_ushr: __ shrl(value); break;
2251 default: ShouldNotReachHere();
2252 }
2253 } else if (left->is_double_cpu()) {
2254 Register lo = left->as_register_lo();
2255 Register hi = left->as_register_hi();
2256 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2257 switch (code) {
2258 case lir_shl: __ shlptr(lo); break;
2259 case lir_shr: __ sarptr(lo); break;
2260 case lir_ushr: __ shrptr(lo); break;
2261 default: ShouldNotReachHere();
2262 }
2263 } else {
2264 ShouldNotReachHere();
2265 }
2266 }
2267
2268
2269 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2270 if (dest->is_single_cpu()) {
2271 // first move left into dest so that left is not destroyed by the shift
2272 Register value = dest->as_register();
2273 count = count & 0x1F; // Java spec
2274
2275 move_regs(left->as_register(), value);
2276 switch (code) {
2277 case lir_shl: __ shll(value, count); break;
2278 case lir_shr: __ sarl(value, count); break;
2279 case lir_ushr: __ shrl(value, count); break;
2280 default: ShouldNotReachHere();
2281 }
2282 } else if (dest->is_double_cpu()) {
2283 // first move left into dest so that left is not destroyed by the shift
2284 Register value = dest->as_register_lo();
2285 count = count & 0x1F; // Java spec
2286
2287 move_regs(left->as_register_lo(), value);
2288 switch (code) {
2289 case lir_shl: __ shlptr(value, count); break;
2290 case lir_shr: __ sarptr(value, count); break;
2291 case lir_ushr: __ shrptr(value, count); break;
2292 default: ShouldNotReachHere();
2293 }
2294 } else {
2295 ShouldNotReachHere();
2296 }
2297 }
2298
2299
2300 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2301 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2302 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2303 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2304 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
2305 }
2306
2307
2308 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2309 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2310 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2311 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2312 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
2313 }
2314
2315
2316 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2317 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2318 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2319 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2320 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
2321 }
2322
2323
2324 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
2325 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2326 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2327 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2328 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
2329 }
2330
2331
2332 // This code replaces a call to arraycopy; no exception may
2333 // be thrown in this code, they must be thrown in the System.arraycopy
2334 // activation frame; we could save some checks if this would not be the case
2335 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2336 ciArrayKlass* default_type = op->expected_type();
2337 Register src = op->src()->as_register();
2338 Register dst = op->dst()->as_register();
2339 Register src_pos = op->src_pos()->as_register();
2340 Register dst_pos = op->dst_pos()->as_register();
2341 Register length = op->length()->as_register();
2342 Register tmp = op->tmp()->as_register();
2343 Register tmp_load_klass = rscratch1;
2344 Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
2345
2346 CodeStub* stub = op->stub();
2347 int flags = op->flags();
2348 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2349 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2350
2351 // if we don't know anything, just go through the generic arraycopy
2352 if (default_type == nullptr) {
2353 // save outgoing arguments on stack in case call to System.arraycopy is needed
2354 // HACK ALERT. This code used to push the parameters in a hardwired fashion
2355 // for interpreter calling conventions. Now we have to do it in new style conventions.
2356 // For the moment until C1 gets the new register allocator I just force all the
2357 // args to the right place (except the register args) and then on the back side
2358 // reload the register args properly if we go slow path. Yuck
2359
2360 // These are proper for the calling convention
2361 store_parameter(length, 2);
2362 store_parameter(dst_pos, 1);
2363 store_parameter(dst, 0);
2364
2365 // these are just temporary placements until we need to reload
2366 store_parameter(src_pos, 3);
2367 store_parameter(src, 4);
2368
2369 address copyfunc_addr = StubRoutines::generic_arraycopy();
2370 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2371
2372 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
2373 // The arguments are in java calling convention so we can trivially shift them to C
2374 // convention
2375 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2376 __ mov(c_rarg0, j_rarg0);
2377 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2378 __ mov(c_rarg1, j_rarg1);
2379 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2380 __ mov(c_rarg2, j_rarg2);
2381 assert_different_registers(c_rarg3, j_rarg4);
2382 __ mov(c_rarg3, j_rarg3);
2383 #ifdef _WIN64
2384 // Allocate abi space for args but be sure to keep stack aligned
2385 __ subptr(rsp, 6*wordSize);
2386 store_parameter(j_rarg4, 4);
2387 #ifndef PRODUCT
2388 if (PrintC1Statistics) {
2389 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2390 }
2391 #endif
2392 __ call(RuntimeAddress(copyfunc_addr));
2393 __ addptr(rsp, 6*wordSize);
2394 #else
2395 __ mov(c_rarg4, j_rarg4);
2396 #ifndef PRODUCT
2397 if (PrintC1Statistics) {
2398 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2399 }
2400 #endif
2401 __ call(RuntimeAddress(copyfunc_addr));
2402 #endif // _WIN64
2403
2404 __ testl(rax, rax);
2405 __ jcc(Assembler::equal, *stub->continuation());
2406
2407 __ mov(tmp, rax);
2408 __ xorl(tmp, -1);
2409
2410 // Reload values from the stack so they are where the stub
2411 // expects them.
2412 __ movptr (dst, Address(rsp, 0*BytesPerWord));
2413 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
2414 __ movptr (length, Address(rsp, 2*BytesPerWord));
2415 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
2416 __ movptr (src, Address(rsp, 4*BytesPerWord));
2417
2418 __ subl(length, tmp);
2419 __ addl(src_pos, tmp);
2420 __ addl(dst_pos, tmp);
2421 __ jmp(*stub->entry());
2422
2423 __ bind(*stub->continuation());
2424 return;
2425 }
2426
2427 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2428
2429 int elem_size = type2aelembytes(basic_type);
2430 Address::ScaleFactor scale;
2431
2432 switch (elem_size) {
2433 case 1 :
2434 scale = Address::times_1;
2435 break;
2436 case 2 :
2437 scale = Address::times_2;
2438 break;
2439 case 4 :
2440 scale = Address::times_4;
2441 break;
2442 case 8 :
2443 scale = Address::times_8;
2444 break;
2445 default:
2446 scale = Address::no_scale;
2447 ShouldNotReachHere();
2448 }
2449
2450 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2451 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2452
2453 // length and pos's are all sign extended at this point on 64bit
2454
2455 // test for null
2456 if (flags & LIR_OpArrayCopy::src_null_check) {
2457 __ testptr(src, src);
2458 __ jcc(Assembler::zero, *stub->entry());
2459 }
2460 if (flags & LIR_OpArrayCopy::dst_null_check) {
2461 __ testptr(dst, dst);
2462 __ jcc(Assembler::zero, *stub->entry());
2463 }
2464
2465 // If the compiler was not able to prove that exact type of the source or the destination
2466 // of the arraycopy is an array type, check at runtime if the source or the destination is
2467 // an instance type.
2468 if (flags & LIR_OpArrayCopy::type_check) {
2469 if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2470 __ load_klass(tmp, dst, tmp_load_klass);
2471 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2472 __ jcc(Assembler::greaterEqual, *stub->entry());
2473 }
2474
2475 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2476 __ load_klass(tmp, src, tmp_load_klass);
2477 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2478 __ jcc(Assembler::greaterEqual, *stub->entry());
2479 }
2480 }
2481
2482 // check if negative
2483 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2484 __ testl(src_pos, src_pos);
2485 __ jcc(Assembler::less, *stub->entry());
2486 }
2487 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2488 __ testl(dst_pos, dst_pos);
2489 __ jcc(Assembler::less, *stub->entry());
2490 }
2491
2492 if (flags & LIR_OpArrayCopy::src_range_check) {
2493 __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
2494 __ cmpl(tmp, src_length_addr);
2495 __ jcc(Assembler::above, *stub->entry());
2496 }
2497 if (flags & LIR_OpArrayCopy::dst_range_check) {
2498 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
2499 __ cmpl(tmp, dst_length_addr);
2500 __ jcc(Assembler::above, *stub->entry());
2501 }
2502
2503 if (flags & LIR_OpArrayCopy::length_positive_check) {
2504 __ testl(length, length);
2505 __ jcc(Assembler::less, *stub->entry());
2506 }
2507
2508 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
2509 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
2510
2511 if (flags & LIR_OpArrayCopy::type_check) {
2512 // We don't know the array types are compatible
2513 if (basic_type != T_OBJECT) {
2514 // Simple test for basic type arrays
2515 __ cmp_klasses_from_objects(src, dst, tmp, tmp2);
2516 __ jcc(Assembler::notEqual, *stub->entry());
2517 } else {
2518 // For object arrays, if src is a sub class of dst then we can
2519 // safely do the copy.
2520 Label cont, slow;
2521
2522 __ push_ppx(src);
2523 __ push_ppx(dst);
2524
2525 __ load_klass(src, src, tmp_load_klass);
2526 __ load_klass(dst, dst, tmp_load_klass);
2527
2528 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2529
2530 __ push_ppx(src);
2531 __ push_ppx(dst);
2532 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2533 __ pop_ppx(dst);
2534 __ pop_ppx(src);
2535
2536 __ testl(src, src);
2537 __ jcc(Assembler::notEqual, cont);
2538
2539 __ bind(slow);
2540 __ pop_ppx(dst);
2541 __ pop_ppx(src);
2542
2543 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2544 if (copyfunc_addr != nullptr) { // use stub if available
2545 // src is not a sub class of dst so we have to do a
2546 // per-element check.
2547
2548 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2549 if ((flags & mask) != mask) {
2550 // Check that at least both of them object arrays.
2551 assert(flags & mask, "one of the two should be known to be an object array");
2552
2553 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2554 __ load_klass(tmp, src, tmp_load_klass);
2555 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2556 __ load_klass(tmp, dst, tmp_load_klass);
2557 }
2558 int lh_offset = in_bytes(Klass::layout_helper_offset());
2559 Address klass_lh_addr(tmp, lh_offset);
2560 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2561 __ cmpl(klass_lh_addr, objArray_lh);
2562 __ jcc(Assembler::notEqual, *stub->entry());
2563 }
2564
2565 // Spill because stubs can use any register they like and it's
2566 // easier to restore just those that we care about.
2567 store_parameter(dst, 0);
2568 store_parameter(dst_pos, 1);
2569 store_parameter(length, 2);
2570 store_parameter(src_pos, 3);
2571 store_parameter(src, 4);
2572
2573 __ movl2ptr(length, length); //higher 32bits must be null
2574
2575 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2576 assert_different_registers(c_rarg0, dst, dst_pos, length);
2577 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2578 assert_different_registers(c_rarg1, dst, length);
2579
2580 __ mov(c_rarg2, length);
2581 assert_different_registers(c_rarg2, dst);
2582
2583 #ifdef _WIN64
2584 // Allocate abi space for args but be sure to keep stack aligned
2585 __ subptr(rsp, 6*wordSize);
2586 __ load_klass(c_rarg3, dst, tmp_load_klass);
2587 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
2588 store_parameter(c_rarg3, 4);
2589 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
2590 __ call(RuntimeAddress(copyfunc_addr));
2591 __ addptr(rsp, 6*wordSize);
2592 #else
2593 __ load_klass(c_rarg4, dst, tmp_load_klass);
2594 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2595 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2596 __ call(RuntimeAddress(copyfunc_addr));
2597 #endif
2598
2599 #ifndef PRODUCT
2600 if (PrintC1Statistics) {
2601 Label failed;
2602 __ testl(rax, rax);
2603 __ jcc(Assembler::notZero, failed);
2604 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1);
2605 __ bind(failed);
2606 }
2607 #endif
2608
2609 __ testl(rax, rax);
2610 __ jcc(Assembler::zero, *stub->continuation());
2611
2612 #ifndef PRODUCT
2613 if (PrintC1Statistics) {
2614 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1);
2615 }
2616 #endif
2617
2618 __ mov(tmp, rax);
2619
2620 __ xorl(tmp, -1);
2621
2622 // Restore previously spilled arguments
2623 __ movptr (dst, Address(rsp, 0*BytesPerWord));
2624 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
2625 __ movptr (length, Address(rsp, 2*BytesPerWord));
2626 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
2627 __ movptr (src, Address(rsp, 4*BytesPerWord));
2628
2629
2630 __ subl(length, tmp);
2631 __ addl(src_pos, tmp);
2632 __ addl(dst_pos, tmp);
2633 }
2634
2635 __ jmp(*stub->entry());
2636
2637 __ bind(cont);
2638 __ pop(dst);
2639 __ pop(src);
2640 }
2641 }
2642
2643 #ifdef ASSERT
2644 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2645 // Sanity check the known type with the incoming class. For the
2646 // primitive case the types must match exactly with src.klass and
2647 // dst.klass each exactly matching the default type. For the
2648 // object array case, if no type check is needed then either the
2649 // dst type is exactly the expected type and the src type is a
2650 // subtype which we can't check or src is the same array as dst
2651 // but not necessarily exactly of type default_type.
2652 Label known_ok, halt;
2653 __ mov_metadata(tmp, default_type->constant_encoding());
2654 if (UseCompressedClassPointers) {
2655 __ encode_klass_not_null(tmp, rscratch1);
2656 }
2657
2658 if (basic_type != T_OBJECT) {
2659 __ cmp_klass(tmp, dst, tmp2);
2660 __ jcc(Assembler::notEqual, halt);
2661 __ cmp_klass(tmp, src, tmp2);
2662 __ jcc(Assembler::equal, known_ok);
2663 } else {
2664 __ cmp_klass(tmp, dst, tmp2);
2665 __ jcc(Assembler::equal, known_ok);
2666 __ cmpptr(src, dst);
2667 __ jcc(Assembler::equal, known_ok);
2668 }
2669 __ bind(halt);
2670 __ stop("incorrect type information in arraycopy");
2671 __ bind(known_ok);
2672 }
2673 #endif
2674
2675 #ifndef PRODUCT
2676 if (PrintC1Statistics) {
2677 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1);
2678 }
2679 #endif
2680
2681 assert_different_registers(c_rarg0, dst, dst_pos, length);
2682 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2683 assert_different_registers(c_rarg1, length);
2684 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2685 __ mov(c_rarg2, length);
2686
2687 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2688 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2689 const char *name;
2690 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2691 __ call_VM_leaf(entry, 0);
2692
2693 if (stub != nullptr) {
2694 __ bind(*stub->continuation());
2695 }
2696 }
2697
2698 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2699 assert(op->crc()->is_single_cpu(), "crc must be register");
2700 assert(op->val()->is_single_cpu(), "byte value must be register");
2701 assert(op->result_opr()->is_single_cpu(), "result must be register");
2702 Register crc = op->crc()->as_register();
2703 Register val = op->val()->as_register();
2704 Register res = op->result_opr()->as_register();
2705
2706 assert_different_registers(val, crc, res);
2707
2708 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
2709 __ notl(crc); // ~crc
2710 __ update_byte_crc32(crc, val, res);
2711 __ notl(crc); // ~crc
2712 __ mov(res, crc);
2713 }
2714
2715 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2716 Register obj = op->obj_opr()->as_register(); // may not be an oop
2717 Register hdr = op->hdr_opr()->as_register();
2718 Register lock = op->lock_opr()->as_register();
2719 if (op->code() == lir_lock) {
2720 Register tmp = op->scratch_opr()->as_register();
2721 // add debug info for NullPointerException only if one is possible
2722 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2723 if (op->info() != nullptr) {
2724 add_debug_info_for_null_check(null_check_offset, op->info());
2725 }
2726 // done
2727 } else if (op->code() == lir_unlock) {
2728 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2729 } else {
2730 Unimplemented();
2731 }
2732 __ bind(*op->stub()->continuation());
2733 }
2734
2735 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2736 Register obj = op->obj()->as_pointer_register();
2737 Register result = op->result_opr()->as_pointer_register();
2738
2739 CodeEmitInfo* info = op->info();
2740 if (info != nullptr) {
2741 add_debug_info_for_null_check_here(info);
2742 }
2743
2744 __ load_klass(result, obj, rscratch1);
2745 }
2746
2747 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2748 ciMethod* method = op->profiled_method();
2749 int bci = op->profiled_bci();
2750 ciMethod* callee = op->profiled_callee();
2751 Register tmp_load_klass = rscratch1;
2752
2753 // Update counter for all call types
2754 ciMethodData* md = method->method_data_or_null();
2755 assert(md != nullptr, "Sanity");
2756 ciProfileData* data = md->bci_to_data(bci);
2757 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2758 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2759 Register mdo = op->mdo()->as_register();
2760 __ mov_metadata(mdo, md->constant_encoding());
2761 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2762 // Perform additional virtual call profiling for invokevirtual and
2763 // invokeinterface bytecodes
2764 if (op->should_profile_receiver_type()) {
2765 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2766 Register recv = op->recv()->as_register();
2767 assert_different_registers(mdo, recv);
2768 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2769 ciKlass* known_klass = op->known_holder();
2770 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2771 // We know the type that will be seen at this call site; we can
2772 // statically update the MethodData* rather than needing to do
2773 // dynamic tests on the receiver type.
2774 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2775 for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
2776 ciKlass* receiver = vc_data->receiver(i);
2777 if (known_klass->equals(receiver)) {
2778 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2779 __ addptr(data_addr, DataLayout::counter_increment);
2780 return;
2781 }
2782 }
2783 // Receiver type is not found in profile data.
2784 // Fall back to runtime helper to handle the rest at runtime.
2785 __ mov_metadata(recv, known_klass->constant_encoding());
2786 } else {
2787 __ load_klass(recv, recv, tmp_load_klass);
2788 }
2789 type_profile_helper(mdo, md, data, recv);
2790 } else {
2791 // Static call
2792 __ addptr(counter_addr, DataLayout::counter_increment);
2793 }
2794 }
2795
2796 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2797 Register obj = op->obj()->as_register();
2798 Register tmp = op->tmp()->as_pointer_register();
2799 Register tmp_load_klass = rscratch1;
2800 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2801 ciKlass* exact_klass = op->exact_klass();
2802 intptr_t current_klass = op->current_klass();
2803 bool not_null = op->not_null();
2804 bool no_conflict = op->no_conflict();
2805
2806 Label update, next, none;
2807
2808 bool do_null = !not_null;
2809 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2810 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2811
2812 assert(do_null || do_update, "why are we here?");
2813 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2814
2815 __ verify_oop(obj);
2816
2817 #ifdef ASSERT
2818 if (obj == tmp) {
2819 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
2820 } else {
2821 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index());
2822 }
2823 #endif
2824 if (do_null) {
2825 __ testptr(obj, obj);
2826 __ jccb(Assembler::notZero, update);
2827 if (!TypeEntries::was_null_seen(current_klass)) {
2828 __ testptr(mdo_addr, TypeEntries::null_seen);
2829 #ifndef ASSERT
2830 __ jccb(Assembler::notZero, next); // already set
2831 #else
2832 __ jcc(Assembler::notZero, next); // already set
2833 #endif
2834 // atomic update to prevent overwriting Klass* with 0
2835 __ lock();
2836 __ orptr(mdo_addr, TypeEntries::null_seen);
2837 }
2838 if (do_update) {
2839 #ifndef ASSERT
2840 __ jmpb(next);
2841 }
2842 #else
2843 __ jmp(next);
2844 }
2845 } else {
2846 __ testptr(obj, obj);
2847 __ jcc(Assembler::notZero, update);
2848 __ stop("unexpected null obj");
2849 #endif
2850 }
2851
2852 __ bind(update);
2853
2854 if (do_update) {
2855 #ifdef ASSERT
2856 if (exact_klass != nullptr) {
2857 Label ok;
2858 __ load_klass(tmp, obj, tmp_load_klass);
2859 __ push_ppx(tmp);
2860 __ mov_metadata(tmp, exact_klass->constant_encoding());
2861 __ cmpptr(tmp, Address(rsp, 0));
2862 __ jcc(Assembler::equal, ok);
2863 __ stop("exact klass and actual klass differ");
2864 __ bind(ok);
2865 __ pop_ppx(tmp);
2866 }
2867 #endif
2868 if (!no_conflict) {
2869 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2870 if (exact_klass != nullptr) {
2871 __ mov_metadata(tmp, exact_klass->constant_encoding());
2872 } else {
2873 __ load_klass(tmp, obj, tmp_load_klass);
2874 }
2875 __ mov(rscratch1, tmp); // save original value before XOR
2876 __ xorptr(tmp, mdo_addr);
2877 __ testptr(tmp, TypeEntries::type_klass_mask);
2878 // klass seen before, nothing to do. The unknown bit may have been
2879 // set already but no need to check.
2880 __ jccb(Assembler::zero, next);
2881
2882 __ testptr(tmp, TypeEntries::type_unknown);
2883 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2884
2885 if (TypeEntries::is_type_none(current_klass)) {
2886 __ testptr(mdo_addr, TypeEntries::type_mask);
2887 __ jccb(Assembler::zero, none);
2888 // There is a chance that the checks above (re-reading profiling
2889 // data from memory) fail if another thread has just set the
2890 // profiling to this obj's klass
2891 __ mov(tmp, rscratch1); // get back original value before XOR
2892 __ xorptr(tmp, mdo_addr);
2893 __ testptr(tmp, TypeEntries::type_klass_mask);
2894 __ jccb(Assembler::zero, next);
2895 }
2896 } else {
2897 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2898 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2899
2900 __ testptr(mdo_addr, TypeEntries::type_unknown);
2901 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2902 }
2903
2904 // different than before. Cannot keep accurate profile.
2905 __ orptr(mdo_addr, TypeEntries::type_unknown);
2906
2907 if (TypeEntries::is_type_none(current_klass)) {
2908 __ jmpb(next);
2909
2910 __ bind(none);
2911 // first time here. Set profile type.
2912 __ movptr(mdo_addr, tmp);
2913 #ifdef ASSERT
2914 __ andptr(tmp, TypeEntries::type_klass_mask);
2915 __ verify_klass_ptr(tmp);
2916 #endif
2917 }
2918 } else {
2919 // There's a single possible klass at this profile point
2920 assert(exact_klass != nullptr, "should be");
2921 if (TypeEntries::is_type_none(current_klass)) {
2922 __ mov_metadata(tmp, exact_klass->constant_encoding());
2923 __ xorptr(tmp, mdo_addr);
2924 __ testptr(tmp, TypeEntries::type_klass_mask);
2925 #ifdef ASSERT
2926 __ jcc(Assembler::zero, next);
2927
2928 {
2929 Label ok;
2930 __ push_ppx(tmp);
2931 __ testptr(mdo_addr, TypeEntries::type_mask);
2932 __ jcc(Assembler::zero, ok);
2933 // may have been set by another thread
2934 __ mov_metadata(tmp, exact_klass->constant_encoding());
2935 __ xorptr(tmp, mdo_addr);
2936 __ testptr(tmp, TypeEntries::type_mask);
2937 __ jcc(Assembler::zero, ok);
2938
2939 __ stop("unexpected profiling mismatch");
2940 __ bind(ok);
2941 __ pop_ppx(tmp);
2942 }
2943 #else
2944 __ jccb(Assembler::zero, next);
2945 #endif
2946 // first time here. Set profile type.
2947 __ movptr(mdo_addr, tmp);
2948 #ifdef ASSERT
2949 __ andptr(tmp, TypeEntries::type_klass_mask);
2950 __ verify_klass_ptr(tmp);
2951 #endif
2952 } else {
2953 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2954 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2955
2956 __ testptr(mdo_addr, TypeEntries::type_unknown);
2957 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2958
2959 __ orptr(mdo_addr, TypeEntries::type_unknown);
2960 }
2961 }
2962 }
2963 __ bind(next);
2964 }
2965
2966 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2967 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2968 }
2969
2970
2971 void LIR_Assembler::align_backward_branch_target() {
2972 __ align(BytesPerWord);
2973 }
2974
2975
2976 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2977 if (left->is_single_cpu()) {
2978 __ negl(left->as_register());
2979 move_regs(left->as_register(), dest->as_register());
2980
2981 } else if (left->is_double_cpu()) {
2982 Register lo = left->as_register_lo();
2983 Register dst = dest->as_register_lo();
2984 __ movptr(dst, lo);
2985 __ negptr(dst);
2986
2987 } else if (dest->is_single_xmm()) {
2988 assert(!tmp->is_valid(), "do not need temporary");
2989 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
2990 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
2991 }
2992 __ xorps(dest->as_xmm_float_reg(),
2993 ExternalAddress((address)float_signflip_pool),
2994 rscratch1);
2995 } else if (dest->is_double_xmm()) {
2996 assert(!tmp->is_valid(), "do not need temporary");
2997 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
2998 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
2999 }
3000 __ xorpd(dest->as_xmm_double_reg(),
3001 ExternalAddress((address)double_signflip_pool),
3002 rscratch1);
3003 } else {
3004 ShouldNotReachHere();
3005 }
3006 }
3007
3008
3009 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3010 assert(src->is_address(), "must be an address");
3011 assert(dest->is_register(), "must be a register");
3012
3013 PatchingStub* patch = nullptr;
3014 if (patch_code != lir_patch_none) {
3015 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3016 }
3017
3018 Register reg = dest->as_pointer_register();
3019 LIR_Address* addr = src->as_address_ptr();
3020 __ lea(reg, as_Address(addr));
3021
3022 if (patch != nullptr) {
3023 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3024 }
3025 }
3026
3027
3028
3029 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3030 assert(!tmp->is_valid(), "don't need temporary");
3031 __ call(RuntimeAddress(dest));
3032 if (info != nullptr) {
3033 add_call_info_here(info);
3034 }
3035 __ post_call_nop();
3036 }
3037
3038
3039 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3040 assert(type == T_LONG, "only for volatile long fields");
3041
3042 if (info != nullptr) {
3043 add_debug_info_for_null_check_here(info);
3044 }
3045
3046 if (src->is_double_xmm()) {
3047 if (dest->is_double_cpu()) {
3048 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3049 } else if (dest->is_double_stack()) {
3050 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3051 } else if (dest->is_address()) {
3052 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3053 } else {
3054 ShouldNotReachHere();
3055 }
3056
3057 } else if (dest->is_double_xmm()) {
3058 if (src->is_double_stack()) {
3059 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3060 } else if (src->is_address()) {
3061 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3062 } else {
3063 ShouldNotReachHere();
3064 }
3065
3066 } else {
3067 ShouldNotReachHere();
3068 }
3069 }
3070
3071 #ifdef ASSERT
3072 // emit run-time assertion
3073 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3074 assert(op->code() == lir_assert, "must be");
3075
3076 if (op->in_opr1()->is_valid()) {
3077 assert(op->in_opr2()->is_valid(), "both operands must be valid");
3078 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3079 } else {
3080 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3081 assert(op->condition() == lir_cond_always, "no other conditions allowed");
3082 }
3083
3084 Label ok;
3085 if (op->condition() != lir_cond_always) {
3086 Assembler::Condition acond = Assembler::zero;
3087 switch (op->condition()) {
3088 case lir_cond_equal: acond = Assembler::equal; break;
3089 case lir_cond_notEqual: acond = Assembler::notEqual; break;
3090 case lir_cond_less: acond = Assembler::less; break;
3091 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3092 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3093 case lir_cond_greater: acond = Assembler::greater; break;
3094 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
3095 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
3096 default: ShouldNotReachHere();
3097 }
3098 __ jcc(acond, ok);
3099 }
3100 if (op->halt()) {
3101 const char* str = __ code_string(op->msg());
3102 __ stop(str);
3103 } else {
3104 breakpoint();
3105 }
3106 __ bind(ok);
3107 }
3108 #endif
3109
3110 void LIR_Assembler::membar() {
3111 // QQQ sparc TSO uses this,
3112 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3113 }
3114
3115 void LIR_Assembler::membar_acquire() {
3116 // No x86 machines currently require load fences
3117 }
3118
3119 void LIR_Assembler::membar_release() {
3120 // No x86 machines currently require store fences
3121 }
3122
3123 void LIR_Assembler::membar_loadload() {
3124 // no-op
3125 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3126 }
3127
3128 void LIR_Assembler::membar_storestore() {
3129 // no-op
3130 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3131 }
3132
3133 void LIR_Assembler::membar_loadstore() {
3134 // no-op
3135 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3136 }
3137
3138 void LIR_Assembler::membar_storeload() {
3139 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3140 }
3141
3142 void LIR_Assembler::on_spin_wait() {
3143 __ pause ();
3144 }
3145
3146 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3147 assert(result_reg->is_register(), "check");
3148 __ mov(result_reg->as_register(), r15_thread);
3149 }
3150
3151
3152 void LIR_Assembler::peephole(LIR_List*) {
3153 // do nothing for now
3154 }
3155
3156 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3157 assert(data == dest, "xchg/xadd uses only 2 operands");
3158
3159 if (data->type() == T_INT) {
3160 if (code == lir_xadd) {
3161 __ lock();
3162 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3163 } else {
3164 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3165 }
3166 } else if (data->is_oop()) {
3167 assert (code == lir_xchg, "xadd for oops");
3168 Register obj = data->as_register();
3169 if (UseCompressedOops) {
3170 __ encode_heap_oop(obj);
3171 __ xchgl(obj, as_Address(src->as_address_ptr()));
3172 __ decode_heap_oop(obj);
3173 } else {
3174 __ xchgptr(obj, as_Address(src->as_address_ptr()));
3175 }
3176 } else if (data->type() == T_LONG) {
3177 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3178 if (code == lir_xadd) {
3179 __ lock();
3180 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3181 } else {
3182 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3183 }
3184 } else {
3185 ShouldNotReachHere();
3186 }
3187 }
3188
3189 #undef __