1 /*
2 * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "code/aotCodeCache.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gc_globals.hpp"
39 #include "nativeInst_x86.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_x86.inline.hpp"
47
48
49 // These masks are used to provide 128-bit aligned bitmasks to the XMM
50 // instructions, to allow sign-masking or sign-bit flipping. They allow
51 // fast versions of NegF/NegD and AbsF/AbsD.
52
53 // Note: 'double' and 'long long' have 32-bits alignment on x86.
54 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
55 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
56 // of 128-bits operands for SSE instructions.
57 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
58 // Store the value to a 128-bits operand.
59 operand[0] = lo;
60 operand[1] = hi;
61 return operand;
62 }
63
64 // Buffer for 128-bits masks used by SSE instructions.
65 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
66
67 // Static initialization during VM startup.
68 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
69 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
70 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
71 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
72
73
74 NEEDS_CLEANUP // remove this definitions ?
75 const Register SYNC_header = rax; // synchronization header
76 const Register SHIFT_count = rcx; // where count for shift operations must be
77
78 #define __ _masm->
79
80 static void select_different_registers(Register preserve,
81 Register extra,
82 Register &tmp1,
83 Register &tmp2,
84 Register &tmp3) {
85 if (tmp1 == preserve) {
86 assert_different_registers(tmp1, tmp2, tmp3, extra);
87 tmp1 = extra;
88 } else if (tmp2 == preserve) {
89 assert_different_registers(tmp1, tmp2, tmp3, extra);
90 tmp2 = extra;
91 } else if (tmp3 == preserve) {
92 assert_different_registers(tmp1, tmp2, tmp3, extra);
93 tmp3 = extra;
94 }
95 assert_different_registers(preserve, tmp1, tmp2, tmp3);
96 }
97
98
99
100 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
101 if (opr->is_constant()) {
102 LIR_Const* constant = opr->as_constant_ptr();
103 switch (constant->type()) {
104 case T_INT: {
105 return true;
106 }
107
108 default:
109 return false;
110 }
111 }
112 return false;
113 }
114
115
116 LIR_Opr LIR_Assembler::receiverOpr() {
117 return FrameMap::receiver_opr;
118 }
119
120 LIR_Opr LIR_Assembler::osrBufferPointer() {
121 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
122 }
123
124 //--------------fpu register translations-----------------------
125
126
127 address LIR_Assembler::float_constant(float f) {
128 address const_addr = __ float_constant(f);
129 if (const_addr == nullptr) {
130 bailout("const section overflow");
131 return __ code()->consts()->start();
132 } else {
133 return const_addr;
134 }
135 }
136
137
138 address LIR_Assembler::double_constant(double d) {
139 address const_addr = __ double_constant(d);
140 if (const_addr == nullptr) {
141 bailout("const section overflow");
142 return __ code()->consts()->start();
143 } else {
144 return const_addr;
145 }
146 }
147
148 void LIR_Assembler::breakpoint() {
149 __ int3();
150 }
151
152 void LIR_Assembler::push(LIR_Opr opr) {
153 if (opr->is_single_cpu()) {
154 __ push_reg(opr->as_register());
155 } else if (opr->is_double_cpu()) {
156 __ push_reg(opr->as_register_lo());
157 } else if (opr->is_stack()) {
158 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
159 } else if (opr->is_constant()) {
160 LIR_Const* const_opr = opr->as_constant_ptr();
161 if (const_opr->type() == T_OBJECT) {
162 __ push_oop(const_opr->as_jobject(), rscratch1);
163 } else if (const_opr->type() == T_INT) {
164 __ push_jint(const_opr->as_jint());
165 } else {
166 ShouldNotReachHere();
167 }
168
169 } else {
170 ShouldNotReachHere();
171 }
172 }
173
174 void LIR_Assembler::pop(LIR_Opr opr) {
175 if (opr->is_single_cpu()) {
176 __ pop_reg(opr->as_register());
177 } else {
178 ShouldNotReachHere();
179 }
180 }
181
182 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
183 return addr->base()->is_illegal() && addr->index()->is_illegal();
184 }
185
186 //-------------------------------------------
187
188 Address LIR_Assembler::as_Address(LIR_Address* addr) {
189 return as_Address(addr, rscratch1);
190 }
191
192 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
193 if (addr->base()->is_illegal()) {
194 assert(addr->index()->is_illegal(), "must be illegal too");
195 AddressLiteral laddr((address)addr->disp(), relocInfo::none);
196 if (! __ reachable(laddr)) {
197 __ movptr(tmp, laddr.addr());
198 Address res(tmp, 0);
199 return res;
200 } else {
201 return __ as_Address(laddr);
202 }
203 }
204
205 Register base = addr->base()->as_pointer_register();
206
207 if (addr->index()->is_illegal()) {
208 return Address( base, addr->disp());
209 } else if (addr->index()->is_cpu_register()) {
210 Register index = addr->index()->as_pointer_register();
211 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
212 } else if (addr->index()->is_constant()) {
213 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
214 assert(Assembler::is_simm32(addr_offset), "must be");
215
216 return Address(base, addr_offset);
217 } else {
218 Unimplemented();
219 return Address();
220 }
221 }
222
223
224 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
225 Address base = as_Address(addr);
226 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
227 }
228
229
230 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
231 return as_Address(addr);
232 }
233
234
235 void LIR_Assembler::osr_entry() {
236 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
237 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
238 ValueStack* entry_state = osr_entry->state();
239 int number_of_locks = entry_state->locks_size();
240
241 // we jump here if osr happens with the interpreter
242 // state set up to continue at the beginning of the
243 // loop that triggered osr - in particular, we have
244 // the following registers setup:
245 //
246 // rcx: osr buffer
247 //
248
249 // build frame
250 ciMethod* m = compilation()->method();
251 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
252
253 // OSR buffer is
254 //
255 // locals[nlocals-1..0]
256 // monitors[0..number_of_locks]
257 //
258 // locals is a direct copy of the interpreter frame so in the osr buffer
259 // so first slot in the local array is the last local from the interpreter
260 // and last slot is local[0] (receiver) from the interpreter
261 //
262 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
263 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
264 // in the interpreter frame (the method lock if a sync method)
265
266 // Initialize monitors in the compiled activation.
267 // rcx: pointer to osr buffer
268 //
269 // All other registers are dead at this point and the locals will be
270 // copied into place by code emitted in the IR.
271
272 Register OSR_buf = osrBufferPointer()->as_pointer_register();
273 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
274 int monitor_offset = BytesPerWord * method()->max_locals() +
275 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
276 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
277 // the OSR buffer using 2 word entries: first the lock and then
278 // the oop.
279 for (int i = 0; i < number_of_locks; i++) {
280 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
281 #ifdef ASSERT
282 // verify the interpreter's monitor has a non-null object
283 {
284 Label L;
285 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
286 __ jcc(Assembler::notZero, L);
287 __ stop("locked object is null");
288 __ bind(L);
289 }
290 #endif
291 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
292 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
293 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
294 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
295 }
296 }
297 }
298
299
300 // inline cache check; done before the frame is built.
301 int LIR_Assembler::check_icache() {
302 return __ ic_check(CodeEntryAlignment);
303 }
304
305 void LIR_Assembler::clinit_barrier(ciMethod* method) {
306 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
307 assert(!method->holder()->is_not_initialized(), "initialization should have been started");
308
309 Label L_skip_barrier;
310 Register klass = rscratch1;
311
312 __ mov_metadata(klass, method->holder()->constant_encoding());
313 __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
314
315 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
316
317 __ bind(L_skip_barrier);
318 }
319
320 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
321 jobject o = nullptr;
322 PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
323 __ movoop(reg, o);
324 patching_epilog(patch, lir_patch_normal, reg, info);
325 }
326
327 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
328 Metadata* o = nullptr;
329 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
330 __ mov_metadata(reg, o);
331 patching_epilog(patch, lir_patch_normal, reg, info);
332 }
333
334 // This specifies the rsp decrement needed to build the frame
335 int LIR_Assembler::initial_frame_size_in_bytes() const {
336 // if rounding, must let FrameMap know!
337
338 // The frame_map records size in slots (32bit word)
339
340 // subtract two words to account for return address and link
341 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
342 }
343
344
345 int LIR_Assembler::emit_exception_handler() {
346 // generate code for exception handler
347 address handler_base = __ start_a_stub(exception_handler_size());
348 if (handler_base == nullptr) {
349 // not enough space left for the handler
350 bailout("exception handler overflow");
351 return -1;
352 }
353
354 int offset = code_offset();
355
356 // the exception oop and pc are in rax, and rdx
357 // no other registers need to be preserved, so invalidate them
358 __ invalidate_registers(false, true, true, false, true, true);
359
360 // check that there is really an exception
361 __ verify_not_null_oop(rax);
362
363 // search an exception handler (rax: exception oop, rdx: throwing pc)
364 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
365 __ should_not_reach_here();
366 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
367 __ end_a_stub();
368
369 return offset;
370 }
371
372
373 // Emit the code to remove the frame from the stack in the exception
374 // unwind path.
375 int LIR_Assembler::emit_unwind_handler() {
376 #ifndef PRODUCT
377 if (CommentedAssembly) {
378 _masm->block_comment("Unwind handler");
379 }
380 #endif
381
382 int offset = code_offset();
383
384 // Fetch the exception from TLS and clear out exception related thread state
385 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
386 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
387 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
388
389 __ bind(_unwind_handler_entry);
390 __ verify_not_null_oop(rax);
391 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
392 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
393 }
394
395 // Perform needed unlocking
396 MonitorExitStub* stub = nullptr;
397 if (method()->is_synchronized()) {
398 monitor_address(0, FrameMap::rax_opr);
399 stub = new MonitorExitStub(FrameMap::rax_opr, 0);
400 __ unlock_object(rdi, rsi, rax, *stub->entry());
401 __ bind(*stub->continuation());
402 }
403
404 if (compilation()->env()->dtrace_method_probes()) {
405 __ mov(rdi, r15_thread);
406 __ mov_metadata(rsi, method()->constant_encoding());
407 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
408 }
409
410 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
411 __ mov(rax, rbx); // Restore the exception
412 }
413
414 // remove the activation and dispatch to the unwind handler
415 __ remove_frame(initial_frame_size_in_bytes());
416 __ jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
417
418 // Emit the slow path assembly
419 if (stub != nullptr) {
420 stub->emit_code(this);
421 }
422
423 return offset;
424 }
425
426
427 int LIR_Assembler::emit_deopt_handler() {
428 // generate code for exception handler
429 address handler_base = __ start_a_stub(deopt_handler_size());
430 if (handler_base == nullptr) {
431 // not enough space left for the handler
432 bailout("deopt handler overflow");
433 return -1;
434 }
435
436 int offset = code_offset();
437
438 Label start;
439 __ bind(start);
440
441 __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
442
443 int entry_offset = __ offset();
444
445 __ jmp(start);
446
447 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
448 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
449 "out of bounds read in post-call NOP check");
450 __ end_a_stub();
451
452 return entry_offset;
453 }
454
455 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
456 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
457 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
458 assert(result->fpu() == 0, "result must already be on TOS");
459 }
460
461 // Pop the stack before the safepoint code
462 __ remove_frame(initial_frame_size_in_bytes());
463
464 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
465 __ reserved_stack_check();
466 }
467
468 // Note: we do not need to round double result; float result has the right precision
469 // the poll sets the condition code, but no data registers
470
471 code_stub->set_safepoint_offset(__ offset());
472 __ relocate(relocInfo::poll_return_type);
473 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
474 __ ret(0);
475 }
476
477
478 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
479 guarantee(info != nullptr, "Shouldn't be null");
480 int offset = __ offset();
481 const Register poll_addr = rscratch1;
482 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
483 add_debug_info_for_branch(info);
484 __ relocate(relocInfo::poll_type);
485 address pre_pc = __ pc();
486 __ testl(rax, Address(poll_addr, 0));
487 address post_pc = __ pc();
488 guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length");
489 return offset;
490 }
491
492
493 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
494 if (from_reg != to_reg) __ mov(to_reg, from_reg);
495 }
496
497 void LIR_Assembler::swap_reg(Register a, Register b) {
498 __ xchgptr(a, b);
499 }
500
501
502 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
503 assert(src->is_constant(), "should not call otherwise");
504 assert(dest->is_register(), "should not call otherwise");
505 LIR_Const* c = src->as_constant_ptr();
506
507 switch (c->type()) {
508 case T_INT: {
509 assert(patch_code == lir_patch_none, "no patching handled here");
510 __ movl(dest->as_register(), c->as_jint());
511 break;
512 }
513
514 case T_ADDRESS: {
515 assert(patch_code == lir_patch_none, "no patching handled here");
516 __ movptr(dest->as_register(), c->as_jint());
517 break;
518 }
519
520 case T_LONG: {
521 assert(patch_code == lir_patch_none, "no patching handled here");
522 #if INCLUDE_CDS
523 if (AOTCodeCache::is_on_for_dump()) {
524 address b = c->as_pointer();
525 if (AOTRuntimeConstants::contains(b)) {
526 __ load_aotrc_address(dest->as_register_lo(), b);
527 break;
528 }
529 }
530 #endif
531 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
532 break;
533 }
534
535 case T_OBJECT: {
536 if (patch_code != lir_patch_none) {
537 jobject2reg_with_patching(dest->as_register(), info);
538 } else {
539 __ movoop(dest->as_register(), c->as_jobject());
540 }
541 break;
542 }
543
544 case T_METADATA: {
545 if (patch_code != lir_patch_none) {
546 klass2reg_with_patching(dest->as_register(), info);
547 } else {
548 __ mov_metadata(dest->as_register(), c->as_metadata());
549 }
550 break;
551 }
552
553 case T_FLOAT: {
554 if (dest->is_single_xmm()) {
555 if (UseAVX <= 2 && c->is_zero_float()) {
556 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
557 } else {
558 __ movflt(dest->as_xmm_float_reg(),
559 InternalAddress(float_constant(c->as_jfloat())));
560 }
561 } else {
562 ShouldNotReachHere();
563 }
564 break;
565 }
566
567 case T_DOUBLE: {
568 if (dest->is_double_xmm()) {
569 if (UseAVX <= 2 && c->is_zero_double()) {
570 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
571 } else {
572 __ movdbl(dest->as_xmm_double_reg(),
573 InternalAddress(double_constant(c->as_jdouble())));
574 }
575 } else {
576 ShouldNotReachHere();
577 }
578 break;
579 }
580
581 default:
582 ShouldNotReachHere();
583 }
584 }
585
586 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
587 assert(src->is_constant(), "should not call otherwise");
588 assert(dest->is_stack(), "should not call otherwise");
589 LIR_Const* c = src->as_constant_ptr();
590
591 switch (c->type()) {
592 case T_INT: // fall through
593 case T_FLOAT:
594 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
595 break;
596
597 case T_ADDRESS:
598 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
599 break;
600
601 case T_OBJECT:
602 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
603 break;
604
605 case T_LONG: // fall through
606 case T_DOUBLE:
607 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
608 lo_word_offset_in_bytes),
609 (intptr_t)c->as_jlong_bits(),
610 rscratch1);
611 break;
612
613 default:
614 ShouldNotReachHere();
615 }
616 }
617
618 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
619 assert(src->is_constant(), "should not call otherwise");
620 assert(dest->is_address(), "should not call otherwise");
621 LIR_Const* c = src->as_constant_ptr();
622 LIR_Address* addr = dest->as_address_ptr();
623
624 int null_check_here = code_offset();
625 switch (type) {
626 case T_INT: // fall through
627 case T_FLOAT:
628 __ movl(as_Address(addr), c->as_jint_bits());
629 break;
630
631 case T_ADDRESS:
632 __ movptr(as_Address(addr), c->as_jint_bits());
633 break;
634
635 case T_OBJECT: // fall through
636 case T_ARRAY:
637 if (c->as_jobject() == nullptr) {
638 if (UseCompressedOops && !wide) {
639 __ movl(as_Address(addr), NULL_WORD);
640 } else {
641 __ xorptr(rscratch1, rscratch1);
642 null_check_here = code_offset();
643 __ movptr(as_Address(addr), rscratch1);
644 }
645 } else {
646 if (is_literal_address(addr)) {
647 ShouldNotReachHere();
648 __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1);
649 } else {
650 __ movoop(rscratch1, c->as_jobject());
651 if (UseCompressedOops && !wide) {
652 __ encode_heap_oop(rscratch1);
653 null_check_here = code_offset();
654 __ movl(as_Address_lo(addr), rscratch1);
655 } else {
656 null_check_here = code_offset();
657 __ movptr(as_Address_lo(addr), rscratch1);
658 }
659 }
660 }
661 break;
662
663 case T_LONG: // fall through
664 case T_DOUBLE:
665 if (is_literal_address(addr)) {
666 ShouldNotReachHere();
667 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
668 } else {
669 __ movptr(r10, (intptr_t)c->as_jlong_bits());
670 null_check_here = code_offset();
671 __ movptr(as_Address_lo(addr), r10);
672 }
673 break;
674
675 case T_BOOLEAN: // fall through
676 case T_BYTE:
677 __ movb(as_Address(addr), c->as_jint() & 0xFF);
678 break;
679
680 case T_CHAR: // fall through
681 case T_SHORT:
682 __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
683 break;
684
685 default:
686 ShouldNotReachHere();
687 };
688
689 if (info != nullptr) {
690 add_debug_info_for_null_check(null_check_here, info);
691 }
692 }
693
694
695 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
696 assert(src->is_register(), "should not call otherwise");
697 assert(dest->is_register(), "should not call otherwise");
698
699 // move between cpu-registers
700 if (dest->is_single_cpu()) {
701 if (src->type() == T_LONG) {
702 // Can do LONG -> OBJECT
703 move_regs(src->as_register_lo(), dest->as_register());
704 return;
705 }
706 assert(src->is_single_cpu(), "must match");
707 if (src->type() == T_OBJECT) {
708 __ verify_oop(src->as_register());
709 }
710 move_regs(src->as_register(), dest->as_register());
711
712 } else if (dest->is_double_cpu()) {
713 if (is_reference_type(src->type())) {
714 // Surprising to me but we can see move of a long to t_object
715 __ verify_oop(src->as_register());
716 move_regs(src->as_register(), dest->as_register_lo());
717 return;
718 }
719 assert(src->is_double_cpu(), "must match");
720 Register f_lo = src->as_register_lo();
721 Register f_hi = src->as_register_hi();
722 Register t_lo = dest->as_register_lo();
723 Register t_hi = dest->as_register_hi();
724 assert(f_hi == f_lo, "must be same");
725 assert(t_hi == t_lo, "must be same");
726 move_regs(f_lo, t_lo);
727
728 // move between xmm-registers
729 } else if (dest->is_single_xmm()) {
730 assert(src->is_single_xmm(), "must match");
731 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
732 } else if (dest->is_double_xmm()) {
733 assert(src->is_double_xmm(), "must match");
734 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
735
736 } else {
737 ShouldNotReachHere();
738 }
739 }
740
741 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
742 assert(src->is_register(), "should not call otherwise");
743 assert(dest->is_stack(), "should not call otherwise");
744
745 if (src->is_single_cpu()) {
746 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
747 if (is_reference_type(type)) {
748 __ verify_oop(src->as_register());
749 __ movptr (dst, src->as_register());
750 } else if (type == T_METADATA || type == T_ADDRESS) {
751 __ movptr (dst, src->as_register());
752 } else {
753 __ movl (dst, src->as_register());
754 }
755
756 } else if (src->is_double_cpu()) {
757 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
758 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
759 __ movptr (dstLO, src->as_register_lo());
760
761 } else if (src->is_single_xmm()) {
762 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
763 __ movflt(dst_addr, src->as_xmm_float_reg());
764
765 } else if (src->is_double_xmm()) {
766 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
767 __ movdbl(dst_addr, src->as_xmm_double_reg());
768
769 } else {
770 ShouldNotReachHere();
771 }
772 }
773
774
775 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
776 LIR_Address* to_addr = dest->as_address_ptr();
777 PatchingStub* patch = nullptr;
778 Register compressed_src = rscratch1;
779
780 if (is_reference_type(type)) {
781 __ verify_oop(src->as_register());
782 if (UseCompressedOops && !wide) {
783 __ movptr(compressed_src, src->as_register());
784 __ encode_heap_oop(compressed_src);
785 if (patch_code != lir_patch_none) {
786 info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
787 }
788 }
789 }
790
791 if (patch_code != lir_patch_none) {
792 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
793 Address toa = as_Address(to_addr);
794 assert(toa.disp() != 0, "must have");
795 }
796
797 int null_check_here = code_offset();
798 switch (type) {
799 case T_FLOAT: {
800 assert(src->is_single_xmm(), "not a float");
801 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
802 break;
803 }
804
805 case T_DOUBLE: {
806 assert(src->is_double_xmm(), "not a double");
807 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
808 break;
809 }
810
811 case T_ARRAY: // fall through
812 case T_OBJECT: // fall through
813 if (UseCompressedOops && !wide) {
814 __ movl(as_Address(to_addr), compressed_src);
815 } else {
816 __ movptr(as_Address(to_addr), src->as_register());
817 }
818 break;
819 case T_ADDRESS:
820 __ movptr(as_Address(to_addr), src->as_register());
821 break;
822 case T_INT:
823 __ movl(as_Address(to_addr), src->as_register());
824 break;
825
826 case T_LONG: {
827 Register from_lo = src->as_register_lo();
828 Register from_hi = src->as_register_hi();
829 __ movptr(as_Address_lo(to_addr), from_lo);
830 break;
831 }
832
833 case T_BYTE: // fall through
834 case T_BOOLEAN: {
835 Register src_reg = src->as_register();
836 Address dst_addr = as_Address(to_addr);
837 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
838 __ movb(dst_addr, src_reg);
839 break;
840 }
841
842 case T_CHAR: // fall through
843 case T_SHORT:
844 __ movw(as_Address(to_addr), src->as_register());
845 break;
846
847 default:
848 ShouldNotReachHere();
849 }
850 if (info != nullptr) {
851 add_debug_info_for_null_check(null_check_here, info);
852 }
853
854 if (patch_code != lir_patch_none) {
855 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
856 }
857 }
858
859
860 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
861 assert(src->is_stack(), "should not call otherwise");
862 assert(dest->is_register(), "should not call otherwise");
863
864 if (dest->is_single_cpu()) {
865 if (is_reference_type(type)) {
866 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
867 __ verify_oop(dest->as_register());
868 } else if (type == T_METADATA || type == T_ADDRESS) {
869 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
870 } else {
871 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
872 }
873
874 } else if (dest->is_double_cpu()) {
875 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
876 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
877 __ movptr(dest->as_register_lo(), src_addr_LO);
878
879 } else if (dest->is_single_xmm()) {
880 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
881 __ movflt(dest->as_xmm_float_reg(), src_addr);
882
883 } else if (dest->is_double_xmm()) {
884 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
885 __ movdbl(dest->as_xmm_double_reg(), src_addr);
886
887 } else {
888 ShouldNotReachHere();
889 }
890 }
891
892
893 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
894 if (src->is_single_stack()) {
895 if (is_reference_type(type)) {
896 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
897 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
898 } else {
899 //no pushl on 64bits
900 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
901 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
902 }
903
904 } else if (src->is_double_stack()) {
905 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
906 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
907
908 } else {
909 ShouldNotReachHere();
910 }
911 }
912
913
914 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
915 assert(src->is_address(), "should not call otherwise");
916 assert(dest->is_register(), "should not call otherwise");
917
918 LIR_Address* addr = src->as_address_ptr();
919 Address from_addr = as_Address(addr);
920
921 if (addr->base()->type() == T_OBJECT) {
922 __ verify_oop(addr->base()->as_pointer_register());
923 }
924
925 switch (type) {
926 case T_BOOLEAN: // fall through
927 case T_BYTE: // fall through
928 case T_CHAR: // fall through
929 case T_SHORT:
930 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
931 // on pre P6 processors we may get partial register stalls
932 // so blow away the value of to_rinfo before loading a
933 // partial word into it. Do it here so that it precedes
934 // the potential patch point below.
935 __ xorptr(dest->as_register(), dest->as_register());
936 }
937 break;
938 default:
939 break;
940 }
941
942 PatchingStub* patch = nullptr;
943 if (patch_code != lir_patch_none) {
944 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
945 assert(from_addr.disp() != 0, "must have");
946 }
947 if (info != nullptr) {
948 add_debug_info_for_null_check_here(info);
949 }
950
951 switch (type) {
952 case T_FLOAT: {
953 if (dest->is_single_xmm()) {
954 __ movflt(dest->as_xmm_float_reg(), from_addr);
955 } else {
956 ShouldNotReachHere();
957 }
958 break;
959 }
960
961 case T_DOUBLE: {
962 if (dest->is_double_xmm()) {
963 __ movdbl(dest->as_xmm_double_reg(), from_addr);
964 } else {
965 ShouldNotReachHere();
966 }
967 break;
968 }
969
970 case T_OBJECT: // fall through
971 case T_ARRAY: // fall through
972 if (UseCompressedOops && !wide) {
973 __ movl(dest->as_register(), from_addr);
974 } else {
975 __ movptr(dest->as_register(), from_addr);
976 }
977 break;
978
979 case T_ADDRESS:
980 __ movptr(dest->as_register(), from_addr);
981 break;
982 case T_INT:
983 __ movl(dest->as_register(), from_addr);
984 break;
985
986 case T_LONG: {
987 Register to_lo = dest->as_register_lo();
988 Register to_hi = dest->as_register_hi();
989 __ movptr(to_lo, as_Address_lo(addr));
990 break;
991 }
992
993 case T_BOOLEAN: // fall through
994 case T_BYTE: {
995 Register dest_reg = dest->as_register();
996 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
997 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
998 __ movsbl(dest_reg, from_addr);
999 } else {
1000 __ movb(dest_reg, from_addr);
1001 __ shll(dest_reg, 24);
1002 __ sarl(dest_reg, 24);
1003 }
1004 break;
1005 }
1006
1007 case T_CHAR: {
1008 Register dest_reg = dest->as_register();
1009 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1010 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1011 __ movzwl(dest_reg, from_addr);
1012 } else {
1013 __ movw(dest_reg, from_addr);
1014 }
1015 break;
1016 }
1017
1018 case T_SHORT: {
1019 Register dest_reg = dest->as_register();
1020 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1021 __ movswl(dest_reg, from_addr);
1022 } else {
1023 __ movw(dest_reg, from_addr);
1024 __ shll(dest_reg, 16);
1025 __ sarl(dest_reg, 16);
1026 }
1027 break;
1028 }
1029
1030 default:
1031 ShouldNotReachHere();
1032 }
1033
1034 if (patch != nullptr) {
1035 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1036 }
1037
1038 if (is_reference_type(type)) {
1039 if (UseCompressedOops && !wide) {
1040 __ decode_heap_oop(dest->as_register());
1041 }
1042
1043 __ verify_oop(dest->as_register());
1044 }
1045 }
1046
1047
1048 NEEDS_CLEANUP; // This could be static?
1049 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1050 int elem_size = type2aelembytes(type);
1051 switch (elem_size) {
1052 case 1: return Address::times_1;
1053 case 2: return Address::times_2;
1054 case 4: return Address::times_4;
1055 case 8: return Address::times_8;
1056 }
1057 ShouldNotReachHere();
1058 return Address::no_scale;
1059 }
1060
1061
1062 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1063 switch (op->code()) {
1064 case lir_idiv:
1065 case lir_irem:
1066 arithmetic_idiv(op->code(),
1067 op->in_opr1(),
1068 op->in_opr2(),
1069 op->in_opr3(),
1070 op->result_opr(),
1071 op->info());
1072 break;
1073 case lir_fmad:
1074 __ fmad(op->result_opr()->as_xmm_double_reg(),
1075 op->in_opr1()->as_xmm_double_reg(),
1076 op->in_opr2()->as_xmm_double_reg(),
1077 op->in_opr3()->as_xmm_double_reg());
1078 break;
1079 case lir_fmaf:
1080 __ fmaf(op->result_opr()->as_xmm_float_reg(),
1081 op->in_opr1()->as_xmm_float_reg(),
1082 op->in_opr2()->as_xmm_float_reg(),
1083 op->in_opr3()->as_xmm_float_reg());
1084 break;
1085 default: ShouldNotReachHere(); break;
1086 }
1087 }
1088
1089 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1090 #ifdef ASSERT
1091 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1092 if (op->block() != nullptr) _branch_target_blocks.append(op->block());
1093 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1094 #endif
1095
1096 if (op->cond() == lir_cond_always) {
1097 if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1098 __ jmp (*(op->label()));
1099 } else {
1100 Assembler::Condition acond = Assembler::zero;
1101 if (op->code() == lir_cond_float_branch) {
1102 assert(op->ublock() != nullptr, "must have unordered successor");
1103 __ jcc(Assembler::parity, *(op->ublock()->label()));
1104 switch(op->cond()) {
1105 case lir_cond_equal: acond = Assembler::equal; break;
1106 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1107 case lir_cond_less: acond = Assembler::below; break;
1108 case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1109 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1110 case lir_cond_greater: acond = Assembler::above; break;
1111 default: ShouldNotReachHere();
1112 }
1113 } else {
1114 switch (op->cond()) {
1115 case lir_cond_equal: acond = Assembler::equal; break;
1116 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1117 case lir_cond_less: acond = Assembler::less; break;
1118 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1119 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1120 case lir_cond_greater: acond = Assembler::greater; break;
1121 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1122 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1123 default: ShouldNotReachHere();
1124 }
1125 }
1126 __ jcc(acond,*(op->label()));
1127 }
1128 }
1129
1130 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1131 LIR_Opr src = op->in_opr();
1132 LIR_Opr dest = op->result_opr();
1133
1134 switch (op->bytecode()) {
1135 case Bytecodes::_i2l:
1136 __ movl2ptr(dest->as_register_lo(), src->as_register());
1137 break;
1138
1139 case Bytecodes::_l2i:
1140 __ movl(dest->as_register(), src->as_register_lo());
1141 break;
1142
1143 case Bytecodes::_i2b:
1144 move_regs(src->as_register(), dest->as_register());
1145 __ sign_extend_byte(dest->as_register());
1146 break;
1147
1148 case Bytecodes::_i2c:
1149 move_regs(src->as_register(), dest->as_register());
1150 __ andl(dest->as_register(), 0xFFFF);
1151 break;
1152
1153 case Bytecodes::_i2s:
1154 move_regs(src->as_register(), dest->as_register());
1155 __ sign_extend_short(dest->as_register());
1156 break;
1157
1158 case Bytecodes::_f2d:
1159 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1160 break;
1161
1162 case Bytecodes::_d2f:
1163 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1164 break;
1165
1166 case Bytecodes::_i2f:
1167 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1168 break;
1169
1170 case Bytecodes::_i2d:
1171 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1172 break;
1173
1174 case Bytecodes::_l2f:
1175 __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo());
1176 break;
1177
1178 case Bytecodes::_l2d:
1179 __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo());
1180 break;
1181
1182 case Bytecodes::_f2i:
1183 __ convert_f2i(dest->as_register(), src->as_xmm_float_reg());
1184 break;
1185
1186 case Bytecodes::_d2i:
1187 __ convert_d2i(dest->as_register(), src->as_xmm_double_reg());
1188 break;
1189
1190 case Bytecodes::_f2l:
1191 __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg());
1192 break;
1193
1194 case Bytecodes::_d2l:
1195 __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg());
1196 break;
1197
1198 default: ShouldNotReachHere();
1199 }
1200 }
1201
1202 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1203 if (op->init_check()) {
1204 add_debug_info_for_null_check_here(op->stub()->info());
1205 // init_state needs acquire, but x86 is TSO, and so we are already good.
1206 __ cmpb(Address(op->klass()->as_register(),
1207 InstanceKlass::init_state_offset()),
1208 InstanceKlass::fully_initialized);
1209 __ jcc(Assembler::notEqual, *op->stub()->entry());
1210 }
1211 __ allocate_object(op->obj()->as_register(),
1212 op->tmp1()->as_register(),
1213 op->tmp2()->as_register(),
1214 op->header_size(),
1215 op->object_size(),
1216 op->klass()->as_register(),
1217 *op->stub()->entry());
1218 __ bind(*op->stub()->continuation());
1219 }
1220
1221 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1222 Register len = op->len()->as_register();
1223 __ movslq(len, len);
1224
1225 if (UseSlowPath ||
1226 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1227 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1228 __ jmp(*op->stub()->entry());
1229 } else {
1230 Register tmp1 = op->tmp1()->as_register();
1231 Register tmp2 = op->tmp2()->as_register();
1232 Register tmp3 = op->tmp3()->as_register();
1233 if (len == tmp1) {
1234 tmp1 = tmp3;
1235 } else if (len == tmp2) {
1236 tmp2 = tmp3;
1237 } else if (len == tmp3) {
1238 // everything is ok
1239 } else {
1240 __ mov(tmp3, len);
1241 }
1242 __ allocate_array(op->obj()->as_register(),
1243 len,
1244 tmp1,
1245 tmp2,
1246 arrayOopDesc::base_offset_in_bytes(op->type()),
1247 array_element_size(op->type()),
1248 op->klass()->as_register(),
1249 *op->stub()->entry(),
1250 op->zero_array());
1251 }
1252 __ bind(*op->stub()->continuation());
1253 }
1254
1255 void LIR_Assembler::type_profile_helper(Register mdo,
1256 ciMethodData *md, ciProfileData *data,
1257 Register recv) {
1258 int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
1259 __ profile_receiver_type(recv, mdo, mdp_offset);
1260 }
1261
1262 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1263 // we always need a stub for the failure case.
1264 CodeStub* stub = op->stub();
1265 Register obj = op->object()->as_register();
1266 Register k_RInfo = op->tmp1()->as_register();
1267 Register klass_RInfo = op->tmp2()->as_register();
1268 Register dst = op->result_opr()->as_register();
1269 ciKlass* k = op->klass();
1270 Register Rtmp1 = noreg;
1271 Register tmp_load_klass = rscratch1;
1272
1273 // check if it needs to be profiled
1274 ciMethodData* md = nullptr;
1275 ciProfileData* data = nullptr;
1276
1277 if (op->should_profile()) {
1278 ciMethod* method = op->profiled_method();
1279 assert(method != nullptr, "Should have method");
1280 int bci = op->profiled_bci();
1281 md = method->method_data_or_null();
1282 assert(md != nullptr, "Sanity");
1283 data = md->bci_to_data(bci);
1284 assert(data != nullptr, "need data for type check");
1285 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1286 }
1287 Label* success_target = success;
1288 Label* failure_target = failure;
1289
1290 if (obj == k_RInfo) {
1291 k_RInfo = dst;
1292 } else if (obj == klass_RInfo) {
1293 klass_RInfo = dst;
1294 }
1295 Rtmp1 = op->tmp3()->as_register();
1296 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1297
1298 assert_different_registers(obj, k_RInfo, klass_RInfo);
1299
1300 __ testptr(obj, obj);
1301 if (op->should_profile()) {
1302 Label not_null;
1303 Register mdo = klass_RInfo;
1304 __ mov_metadata(mdo, md->constant_encoding());
1305 __ jccb(Assembler::notEqual, not_null);
1306 // Object is null; update MDO and exit
1307 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1308 int header_bits = BitData::null_seen_byte_constant();
1309 __ orb(data_addr, header_bits);
1310 __ jmp(*obj_is_null);
1311 __ bind(not_null);
1312
1313 Register recv = k_RInfo;
1314 __ load_klass(recv, obj, tmp_load_klass);
1315 type_profile_helper(mdo, md, data, recv);
1316 } else {
1317 __ jcc(Assembler::equal, *obj_is_null);
1318 }
1319
1320 if (!k->is_loaded()) {
1321 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1322 } else {
1323 __ mov_metadata(k_RInfo, k->constant_encoding());
1324 }
1325 __ verify_oop(obj);
1326
1327 if (op->fast_check()) {
1328 // get object class
1329 // not a safepoint as obj null check happens earlier
1330 __ load_klass(Rtmp1, obj, tmp_load_klass);
1331 __ cmpptr(k_RInfo, Rtmp1);
1332 __ jcc(Assembler::notEqual, *failure_target);
1333 // successful cast, fall through to profile or jump
1334 } else {
1335 // get object class
1336 // not a safepoint as obj null check happens earlier
1337 __ load_klass(klass_RInfo, obj, tmp_load_klass);
1338 if (k->is_loaded()) {
1339 // See if we get an immediate positive hit
1340 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1341 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1342 __ jcc(Assembler::notEqual, *failure_target);
1343 // successful cast, fall through to profile or jump
1344 } else {
1345 // See if we get an immediate positive hit
1346 __ jcc(Assembler::equal, *success_target);
1347 // check for self
1348 __ cmpptr(klass_RInfo, k_RInfo);
1349 __ jcc(Assembler::equal, *success_target);
1350
1351 __ push_ppx(klass_RInfo);
1352 __ push_ppx(k_RInfo);
1353 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1354 __ pop_ppx(klass_RInfo);
1355 __ pop_ppx(klass_RInfo);
1356 // result is a boolean
1357 __ testl(klass_RInfo, klass_RInfo);
1358 __ jcc(Assembler::equal, *failure_target);
1359 // successful cast, fall through to profile or jump
1360 }
1361 } else {
1362 // perform the fast part of the checking logic
1363 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1364 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1365 __ push_ppx(klass_RInfo);
1366 __ push_ppx(k_RInfo);
1367 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1368 __ pop_ppx(klass_RInfo);
1369 __ pop_ppx(k_RInfo);
1370 // result is a boolean
1371 __ testl(k_RInfo, k_RInfo);
1372 __ jcc(Assembler::equal, *failure_target);
1373 // successful cast, fall through to profile or jump
1374 }
1375 }
1376 __ jmp(*success);
1377 }
1378
1379
1380 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1381 Register tmp_load_klass = rscratch1;
1382 LIR_Code code = op->code();
1383 if (code == lir_store_check) {
1384 Register value = op->object()->as_register();
1385 Register array = op->array()->as_register();
1386 Register k_RInfo = op->tmp1()->as_register();
1387 Register klass_RInfo = op->tmp2()->as_register();
1388 Register Rtmp1 = op->tmp3()->as_register();
1389
1390 CodeStub* stub = op->stub();
1391
1392 // check if it needs to be profiled
1393 ciMethodData* md = nullptr;
1394 ciProfileData* data = nullptr;
1395
1396 if (op->should_profile()) {
1397 ciMethod* method = op->profiled_method();
1398 assert(method != nullptr, "Should have method");
1399 int bci = op->profiled_bci();
1400 md = method->method_data_or_null();
1401 assert(md != nullptr, "Sanity");
1402 data = md->bci_to_data(bci);
1403 assert(data != nullptr, "need data for type check");
1404 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1405 }
1406 Label done;
1407 Label* success_target = &done;
1408 Label* failure_target = stub->entry();
1409
1410 __ testptr(value, value);
1411 if (op->should_profile()) {
1412 Label not_null;
1413 Register mdo = klass_RInfo;
1414 __ mov_metadata(mdo, md->constant_encoding());
1415 __ jccb(Assembler::notEqual, not_null);
1416 // Object is null; update MDO and exit
1417 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1418 int header_bits = BitData::null_seen_byte_constant();
1419 __ orb(data_addr, header_bits);
1420 __ jmp(done);
1421 __ bind(not_null);
1422
1423 Register recv = k_RInfo;
1424 __ load_klass(recv, value, tmp_load_klass);
1425 type_profile_helper(mdo, md, data, recv);
1426 } else {
1427 __ jcc(Assembler::equal, done);
1428 }
1429
1430 add_debug_info_for_null_check_here(op->info_for_exception());
1431 __ load_klass(k_RInfo, array, tmp_load_klass);
1432 __ load_klass(klass_RInfo, value, tmp_load_klass);
1433
1434 // get instance klass (it's already uncompressed)
1435 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1436 // perform the fast part of the checking logic
1437 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1438 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1439 __ push_ppx(klass_RInfo);
1440 __ push_ppx(k_RInfo);
1441 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1442 __ pop_ppx(klass_RInfo);
1443 __ pop_ppx(k_RInfo);
1444 // result is a boolean
1445 __ testl(k_RInfo, k_RInfo);
1446 __ jcc(Assembler::equal, *failure_target);
1447 // fall through to the success case
1448
1449 __ bind(done);
1450 } else
1451 if (code == lir_checkcast) {
1452 Register obj = op->object()->as_register();
1453 Register dst = op->result_opr()->as_register();
1454 Label success;
1455 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1456 __ bind(success);
1457 if (dst != obj) {
1458 __ mov(dst, obj);
1459 }
1460 } else
1461 if (code == lir_instanceof) {
1462 Register obj = op->object()->as_register();
1463 Register dst = op->result_opr()->as_register();
1464 Label success, failure, done;
1465 emit_typecheck_helper(op, &success, &failure, &failure);
1466 __ bind(failure);
1467 __ xorptr(dst, dst);
1468 __ jmpb(done);
1469 __ bind(success);
1470 __ movptr(dst, 1);
1471 __ bind(done);
1472 } else {
1473 ShouldNotReachHere();
1474 }
1475
1476 }
1477
1478
1479 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1480 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1481 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1482 Register newval = op->new_value()->as_register();
1483 Register cmpval = op->cmp_value()->as_register();
1484 assert(cmpval == rax, "wrong register");
1485 assert(newval != noreg, "new val must be register");
1486 assert(cmpval != newval, "cmp and new values must be in different registers");
1487 assert(cmpval != addr, "cmp and addr must be in different registers");
1488 assert(newval != addr, "new value and addr must be in different registers");
1489
1490 if (op->code() == lir_cas_obj) {
1491 if (UseCompressedOops) {
1492 __ encode_heap_oop(cmpval);
1493 __ mov(rscratch1, newval);
1494 __ encode_heap_oop(rscratch1);
1495 __ lock();
1496 // cmpval (rax) is implicitly used by this instruction
1497 __ cmpxchgl(rscratch1, Address(addr, 0));
1498 } else {
1499 __ lock();
1500 __ cmpxchgptr(newval, Address(addr, 0));
1501 }
1502 } else {
1503 assert(op->code() == lir_cas_int, "lir_cas_int expected");
1504 __ lock();
1505 __ cmpxchgl(newval, Address(addr, 0));
1506 }
1507 } else if (op->code() == lir_cas_long) {
1508 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1509 Register newval = op->new_value()->as_register_lo();
1510 Register cmpval = op->cmp_value()->as_register_lo();
1511 assert(cmpval == rax, "wrong register");
1512 assert(newval != noreg, "new val must be register");
1513 assert(cmpval != newval, "cmp and new values must be in different registers");
1514 assert(cmpval != addr, "cmp and addr must be in different registers");
1515 assert(newval != addr, "new value and addr must be in different registers");
1516 __ lock();
1517 __ cmpxchgq(newval, Address(addr, 0));
1518 } else {
1519 Unimplemented();
1520 }
1521 }
1522
1523 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1524 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1525 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
1526
1527 Assembler::Condition acond, ncond;
1528 switch (condition) {
1529 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
1530 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
1531 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
1532 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
1533 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
1534 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
1535 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
1536 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
1537 default: acond = Assembler::equal; ncond = Assembler::notEqual;
1538 ShouldNotReachHere();
1539 }
1540
1541 if (opr1->is_cpu_register()) {
1542 reg2reg(opr1, result);
1543 } else if (opr1->is_stack()) {
1544 stack2reg(opr1, result, result->type());
1545 } else if (opr1->is_constant()) {
1546 const2reg(opr1, result, lir_patch_none, nullptr);
1547 } else {
1548 ShouldNotReachHere();
1549 }
1550
1551 if (VM_Version::supports_cmov() && !opr2->is_constant()) {
1552 // optimized version that does not require a branch
1553 if (opr2->is_single_cpu()) {
1554 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1555 __ cmov(ncond, result->as_register(), opr2->as_register());
1556 } else if (opr2->is_double_cpu()) {
1557 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1558 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1559 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
1560 } else if (opr2->is_single_stack()) {
1561 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
1562 } else if (opr2->is_double_stack()) {
1563 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
1564 } else {
1565 ShouldNotReachHere();
1566 }
1567
1568 } else {
1569 Label skip;
1570 __ jccb(acond, skip);
1571 if (opr2->is_cpu_register()) {
1572 reg2reg(opr2, result);
1573 } else if (opr2->is_stack()) {
1574 stack2reg(opr2, result, result->type());
1575 } else if (opr2->is_constant()) {
1576 const2reg(opr2, result, lir_patch_none, nullptr);
1577 } else {
1578 ShouldNotReachHere();
1579 }
1580 __ bind(skip);
1581 }
1582 }
1583
1584
1585 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1586 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1587
1588 if (left->is_single_cpu()) {
1589 assert(left == dest, "left and dest must be equal");
1590 Register lreg = left->as_register();
1591
1592 if (right->is_single_cpu()) {
1593 // cpu register - cpu register
1594 Register rreg = right->as_register();
1595 switch (code) {
1596 case lir_add: __ addl (lreg, rreg); break;
1597 case lir_sub: __ subl (lreg, rreg); break;
1598 case lir_mul: __ imull(lreg, rreg); break;
1599 default: ShouldNotReachHere();
1600 }
1601
1602 } else if (right->is_stack()) {
1603 // cpu register - stack
1604 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1605 switch (code) {
1606 case lir_add: __ addl(lreg, raddr); break;
1607 case lir_sub: __ subl(lreg, raddr); break;
1608 default: ShouldNotReachHere();
1609 }
1610
1611 } else if (right->is_constant()) {
1612 // cpu register - constant
1613 jint c = right->as_constant_ptr()->as_jint();
1614 switch (code) {
1615 case lir_add: {
1616 __ incrementl(lreg, c);
1617 break;
1618 }
1619 case lir_sub: {
1620 __ decrementl(lreg, c);
1621 break;
1622 }
1623 default: ShouldNotReachHere();
1624 }
1625
1626 } else {
1627 ShouldNotReachHere();
1628 }
1629
1630 } else if (left->is_double_cpu()) {
1631 assert(left == dest, "left and dest must be equal");
1632 Register lreg_lo = left->as_register_lo();
1633 Register lreg_hi = left->as_register_hi();
1634
1635 if (right->is_double_cpu()) {
1636 // cpu register - cpu register
1637 Register rreg_lo = right->as_register_lo();
1638 Register rreg_hi = right->as_register_hi();
1639 assert_different_registers(lreg_lo, rreg_lo);
1640 switch (code) {
1641 case lir_add:
1642 __ addptr(lreg_lo, rreg_lo);
1643 break;
1644 case lir_sub:
1645 __ subptr(lreg_lo, rreg_lo);
1646 break;
1647 case lir_mul:
1648 __ imulq(lreg_lo, rreg_lo);
1649 break;
1650 default:
1651 ShouldNotReachHere();
1652 }
1653
1654 } else if (right->is_constant()) {
1655 // cpu register - constant
1656 jlong c = right->as_constant_ptr()->as_jlong_bits();
1657 __ movptr(r10, (intptr_t) c);
1658 switch (code) {
1659 case lir_add:
1660 __ addptr(lreg_lo, r10);
1661 break;
1662 case lir_sub:
1663 __ subptr(lreg_lo, r10);
1664 break;
1665 default:
1666 ShouldNotReachHere();
1667 }
1668
1669 } else {
1670 ShouldNotReachHere();
1671 }
1672
1673 } else if (left->is_single_xmm()) {
1674 assert(left == dest, "left and dest must be equal");
1675 XMMRegister lreg = left->as_xmm_float_reg();
1676
1677 if (right->is_single_xmm()) {
1678 XMMRegister rreg = right->as_xmm_float_reg();
1679 switch (code) {
1680 case lir_add: __ addss(lreg, rreg); break;
1681 case lir_sub: __ subss(lreg, rreg); break;
1682 case lir_mul: __ mulss(lreg, rreg); break;
1683 case lir_div: __ divss(lreg, rreg); break;
1684 default: ShouldNotReachHere();
1685 }
1686 } else {
1687 Address raddr;
1688 if (right->is_single_stack()) {
1689 raddr = frame_map()->address_for_slot(right->single_stack_ix());
1690 } else if (right->is_constant()) {
1691 // hack for now
1692 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
1693 } else {
1694 ShouldNotReachHere();
1695 }
1696 switch (code) {
1697 case lir_add: __ addss(lreg, raddr); break;
1698 case lir_sub: __ subss(lreg, raddr); break;
1699 case lir_mul: __ mulss(lreg, raddr); break;
1700 case lir_div: __ divss(lreg, raddr); break;
1701 default: ShouldNotReachHere();
1702 }
1703 }
1704
1705 } else if (left->is_double_xmm()) {
1706 assert(left == dest, "left and dest must be equal");
1707
1708 XMMRegister lreg = left->as_xmm_double_reg();
1709 if (right->is_double_xmm()) {
1710 XMMRegister rreg = right->as_xmm_double_reg();
1711 switch (code) {
1712 case lir_add: __ addsd(lreg, rreg); break;
1713 case lir_sub: __ subsd(lreg, rreg); break;
1714 case lir_mul: __ mulsd(lreg, rreg); break;
1715 case lir_div: __ divsd(lreg, rreg); break;
1716 default: ShouldNotReachHere();
1717 }
1718 } else {
1719 Address raddr;
1720 if (right->is_double_stack()) {
1721 raddr = frame_map()->address_for_slot(right->double_stack_ix());
1722 } else if (right->is_constant()) {
1723 // hack for now
1724 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
1725 } else {
1726 ShouldNotReachHere();
1727 }
1728 switch (code) {
1729 case lir_add: __ addsd(lreg, raddr); break;
1730 case lir_sub: __ subsd(lreg, raddr); break;
1731 case lir_mul: __ mulsd(lreg, raddr); break;
1732 case lir_div: __ divsd(lreg, raddr); break;
1733 default: ShouldNotReachHere();
1734 }
1735 }
1736
1737 } else if (left->is_single_stack() || left->is_address()) {
1738 assert(left == dest, "left and dest must be equal");
1739
1740 Address laddr;
1741 if (left->is_single_stack()) {
1742 laddr = frame_map()->address_for_slot(left->single_stack_ix());
1743 } else if (left->is_address()) {
1744 laddr = as_Address(left->as_address_ptr());
1745 } else {
1746 ShouldNotReachHere();
1747 }
1748
1749 if (right->is_single_cpu()) {
1750 Register rreg = right->as_register();
1751 switch (code) {
1752 case lir_add: __ addl(laddr, rreg); break;
1753 case lir_sub: __ subl(laddr, rreg); break;
1754 default: ShouldNotReachHere();
1755 }
1756 } else if (right->is_constant()) {
1757 jint c = right->as_constant_ptr()->as_jint();
1758 switch (code) {
1759 case lir_add: {
1760 __ incrementl(laddr, c);
1761 break;
1762 }
1763 case lir_sub: {
1764 __ decrementl(laddr, c);
1765 break;
1766 }
1767 default: ShouldNotReachHere();
1768 }
1769 } else {
1770 ShouldNotReachHere();
1771 }
1772
1773 } else {
1774 ShouldNotReachHere();
1775 }
1776 }
1777
1778
1779 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1780 if (value->is_double_xmm()) {
1781 switch(code) {
1782 case lir_abs :
1783 {
1784 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1785 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1786 }
1787 assert(!tmp->is_valid(), "do not need temporary");
1788 __ andpd(dest->as_xmm_double_reg(),
1789 ExternalAddress((address)double_signmask_pool),
1790 rscratch1);
1791 }
1792 break;
1793
1794 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1795 // all other intrinsics are not available in the SSE instruction set, so FPU is used
1796 default : ShouldNotReachHere();
1797 }
1798
1799 } else if (code == lir_f2hf) {
1800 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1801 } else if (code == lir_hf2f) {
1802 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1803 } else {
1804 Unimplemented();
1805 }
1806 }
1807
1808 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1809 // assert(left->destroys_register(), "check");
1810 if (left->is_single_cpu()) {
1811 Register reg = left->as_register();
1812 if (right->is_constant()) {
1813 int val = right->as_constant_ptr()->as_jint();
1814 switch (code) {
1815 case lir_logic_and: __ andl (reg, val); break;
1816 case lir_logic_or: __ orl (reg, val); break;
1817 case lir_logic_xor: __ xorl (reg, val); break;
1818 default: ShouldNotReachHere();
1819 }
1820 } else if (right->is_stack()) {
1821 // added support for stack operands
1822 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1823 switch (code) {
1824 case lir_logic_and: __ andl (reg, raddr); break;
1825 case lir_logic_or: __ orl (reg, raddr); break;
1826 case lir_logic_xor: __ xorl (reg, raddr); break;
1827 default: ShouldNotReachHere();
1828 }
1829 } else {
1830 Register rright = right->as_register();
1831 switch (code) {
1832 case lir_logic_and: __ andptr (reg, rright); break;
1833 case lir_logic_or : __ orptr (reg, rright); break;
1834 case lir_logic_xor: __ xorptr (reg, rright); break;
1835 default: ShouldNotReachHere();
1836 }
1837 }
1838 move_regs(reg, dst->as_register());
1839 } else {
1840 Register l_lo = left->as_register_lo();
1841 Register l_hi = left->as_register_hi();
1842 if (right->is_constant()) {
1843 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
1844 switch (code) {
1845 case lir_logic_and:
1846 __ andq(l_lo, rscratch1);
1847 break;
1848 case lir_logic_or:
1849 __ orq(l_lo, rscratch1);
1850 break;
1851 case lir_logic_xor:
1852 __ xorq(l_lo, rscratch1);
1853 break;
1854 default: ShouldNotReachHere();
1855 }
1856 } else {
1857 Register r_lo;
1858 if (is_reference_type(right->type())) {
1859 r_lo = right->as_register();
1860 } else {
1861 r_lo = right->as_register_lo();
1862 }
1863 switch (code) {
1864 case lir_logic_and:
1865 __ andptr(l_lo, r_lo);
1866 break;
1867 case lir_logic_or:
1868 __ orptr(l_lo, r_lo);
1869 break;
1870 case lir_logic_xor:
1871 __ xorptr(l_lo, r_lo);
1872 break;
1873 default: ShouldNotReachHere();
1874 }
1875 }
1876
1877 Register dst_lo = dst->as_register_lo();
1878 Register dst_hi = dst->as_register_hi();
1879
1880 move_regs(l_lo, dst_lo);
1881 }
1882 }
1883
1884
1885 // we assume that rax, and rdx can be overwritten
1886 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
1887
1888 assert(left->is_single_cpu(), "left must be register");
1889 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
1890 assert(result->is_single_cpu(), "result must be register");
1891
1892 // assert(left->destroys_register(), "check");
1893 // assert(right->destroys_register(), "check");
1894
1895 Register lreg = left->as_register();
1896 Register dreg = result->as_register();
1897
1898 if (right->is_constant()) {
1899 jint divisor = right->as_constant_ptr()->as_jint();
1900 assert(divisor > 0 && is_power_of_2(divisor), "must be");
1901 if (code == lir_idiv) {
1902 assert(lreg == rax, "must be rax,");
1903 assert(temp->as_register() == rdx, "tmp register must be rdx");
1904 __ cdql(); // sign extend into rdx:rax
1905 if (divisor == 2) {
1906 __ subl(lreg, rdx);
1907 } else {
1908 __ andl(rdx, divisor - 1);
1909 __ addl(lreg, rdx);
1910 }
1911 __ sarl(lreg, log2i_exact(divisor));
1912 move_regs(lreg, dreg);
1913 } else if (code == lir_irem) {
1914 Label done;
1915 __ mov(dreg, lreg);
1916 __ andl(dreg, 0x80000000 | (divisor - 1));
1917 __ jcc(Assembler::positive, done);
1918 __ decrement(dreg);
1919 __ orl(dreg, ~(divisor - 1));
1920 __ increment(dreg);
1921 __ bind(done);
1922 } else {
1923 ShouldNotReachHere();
1924 }
1925 } else {
1926 Register rreg = right->as_register();
1927 assert(lreg == rax, "left register must be rax,");
1928 assert(rreg != rdx, "right register must not be rdx");
1929 assert(temp->as_register() == rdx, "tmp register must be rdx");
1930
1931 move_regs(lreg, rax);
1932
1933 int idivl_offset = __ corrected_idivl(rreg);
1934 if (ImplicitDiv0Checks) {
1935 add_debug_info_for_div0(idivl_offset, info);
1936 }
1937 if (code == lir_irem) {
1938 move_regs(rdx, dreg); // result is in rdx
1939 } else {
1940 move_regs(rax, dreg);
1941 }
1942 }
1943 }
1944
1945
1946 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1947 if (opr1->is_single_cpu()) {
1948 Register reg1 = opr1->as_register();
1949 if (opr2->is_single_cpu()) {
1950 // cpu register - cpu register
1951 if (is_reference_type(opr1->type())) {
1952 __ cmpoop(reg1, opr2->as_register());
1953 } else {
1954 assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1955 __ cmpl(reg1, opr2->as_register());
1956 }
1957 } else if (opr2->is_stack()) {
1958 // cpu register - stack
1959 if (is_reference_type(opr1->type())) {
1960 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1961 } else {
1962 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1963 }
1964 } else if (opr2->is_constant()) {
1965 // cpu register - constant
1966 LIR_Const* c = opr2->as_constant_ptr();
1967 if (c->type() == T_INT) {
1968 jint i = c->as_jint();
1969 if (i == 0) {
1970 __ testl(reg1, reg1);
1971 } else {
1972 __ cmpl(reg1, i);
1973 }
1974 } else if (c->type() == T_METADATA) {
1975 // All we need for now is a comparison with null for equality.
1976 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1977 Metadata* m = c->as_metadata();
1978 if (m == nullptr) {
1979 __ testptr(reg1, reg1);
1980 } else {
1981 ShouldNotReachHere();
1982 }
1983 } else if (is_reference_type(c->type())) {
1984 // In 64bit oops are single register
1985 jobject o = c->as_jobject();
1986 if (o == nullptr) {
1987 __ testptr(reg1, reg1);
1988 } else {
1989 __ cmpoop(reg1, o, rscratch1);
1990 }
1991 } else {
1992 fatal("unexpected type: %s", basictype_to_str(c->type()));
1993 }
1994 // cpu register - address
1995 } else if (opr2->is_address()) {
1996 if (op->info() != nullptr) {
1997 add_debug_info_for_null_check_here(op->info());
1998 }
1999 __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2000 } else {
2001 ShouldNotReachHere();
2002 }
2003
2004 } else if(opr1->is_double_cpu()) {
2005 Register xlo = opr1->as_register_lo();
2006 Register xhi = opr1->as_register_hi();
2007 if (opr2->is_double_cpu()) {
2008 __ cmpptr(xlo, opr2->as_register_lo());
2009 } else if (opr2->is_constant()) {
2010 // cpu register - constant 0
2011 assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2012 __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2013 } else {
2014 ShouldNotReachHere();
2015 }
2016
2017 } else if (opr1->is_single_xmm()) {
2018 XMMRegister reg1 = opr1->as_xmm_float_reg();
2019 if (opr2->is_single_xmm()) {
2020 // xmm register - xmm register
2021 __ ucomiss(reg1, opr2->as_xmm_float_reg());
2022 } else if (opr2->is_stack()) {
2023 // xmm register - stack
2024 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2025 } else if (opr2->is_constant()) {
2026 // xmm register - constant
2027 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2028 } else if (opr2->is_address()) {
2029 // xmm register - address
2030 if (op->info() != nullptr) {
2031 add_debug_info_for_null_check_here(op->info());
2032 }
2033 __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2034 } else {
2035 ShouldNotReachHere();
2036 }
2037
2038 } else if (opr1->is_double_xmm()) {
2039 XMMRegister reg1 = opr1->as_xmm_double_reg();
2040 if (opr2->is_double_xmm()) {
2041 // xmm register - xmm register
2042 __ ucomisd(reg1, opr2->as_xmm_double_reg());
2043 } else if (opr2->is_stack()) {
2044 // xmm register - stack
2045 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2046 } else if (opr2->is_constant()) {
2047 // xmm register - constant
2048 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2049 } else if (opr2->is_address()) {
2050 // xmm register - address
2051 if (op->info() != nullptr) {
2052 add_debug_info_for_null_check_here(op->info());
2053 }
2054 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2055 } else {
2056 ShouldNotReachHere();
2057 }
2058
2059 } else if (opr1->is_address() && opr2->is_constant()) {
2060 LIR_Const* c = opr2->as_constant_ptr();
2061 if (is_reference_type(c->type())) {
2062 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2063 __ movoop(rscratch1, c->as_jobject());
2064 }
2065 if (op->info() != nullptr) {
2066 add_debug_info_for_null_check_here(op->info());
2067 }
2068 // special case: address - constant
2069 LIR_Address* addr = opr1->as_address_ptr();
2070 if (c->type() == T_INT) {
2071 __ cmpl(as_Address(addr), c->as_jint());
2072 } else if (is_reference_type(c->type())) {
2073 // %%% Make this explode if addr isn't reachable until we figure out a
2074 // better strategy by giving noreg as the temp for as_Address
2075 __ cmpoop(rscratch1, as_Address(addr, noreg));
2076 } else {
2077 ShouldNotReachHere();
2078 }
2079
2080 } else {
2081 ShouldNotReachHere();
2082 }
2083 }
2084
2085 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2086 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2087 if (left->is_single_xmm()) {
2088 assert(right->is_single_xmm(), "must match");
2089 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2090 } else if (left->is_double_xmm()) {
2091 assert(right->is_double_xmm(), "must match");
2092 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2093
2094 } else {
2095 ShouldNotReachHere();
2096 }
2097 } else {
2098 assert(code == lir_cmp_l2i, "check");
2099 Label done;
2100 Register dest = dst->as_register();
2101 __ cmpptr(left->as_register_lo(), right->as_register_lo());
2102 __ movl(dest, -1);
2103 __ jccb(Assembler::less, done);
2104 __ setb(Assembler::notZero, dest);
2105 __ movzbl(dest, dest);
2106 __ bind(done);
2107 }
2108 }
2109
2110
2111 void LIR_Assembler::align_call(LIR_Code code) {
2112 // make sure that the displacement word of the call ends up word aligned
2113 int offset = __ offset();
2114 switch (code) {
2115 case lir_static_call:
2116 case lir_optvirtual_call:
2117 case lir_dynamic_call:
2118 offset += NativeCall::displacement_offset;
2119 break;
2120 case lir_icvirtual_call:
2121 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex;
2122 break;
2123 default: ShouldNotReachHere();
2124 }
2125 __ align(BytesPerWord, offset);
2126 }
2127
2128
2129 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2130 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2131 "must be aligned");
2132 __ call(AddressLiteral(op->addr(), rtype));
2133 add_call_info(code_offset(), op->info());
2134 __ post_call_nop();
2135 }
2136
2137
2138 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2139 __ ic_call(op->addr());
2140 add_call_info(code_offset(), op->info());
2141 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2142 "must be aligned");
2143 __ post_call_nop();
2144 }
2145
2146
2147 void LIR_Assembler::emit_static_call_stub() {
2148 address call_pc = __ pc();
2149 address stub = __ start_a_stub(call_stub_size());
2150 if (stub == nullptr) {
2151 bailout("static call stub overflow");
2152 return;
2153 }
2154
2155 int start = __ offset();
2156
2157 // make sure that the displacement word of the call ends up word aligned
2158 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset);
2159 __ relocate(static_stub_Relocation::spec(call_pc));
2160 __ mov_metadata(rbx, (Metadata*)nullptr);
2161 // must be set to -1 at code generation time
2162 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2163 // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2164 __ jump(RuntimeAddress(__ pc()));
2165
2166 assert(__ offset() - start <= call_stub_size(), "stub too big");
2167 __ end_a_stub();
2168 }
2169
2170
2171 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2172 assert(exceptionOop->as_register() == rax, "must match");
2173 assert(exceptionPC->as_register() == rdx, "must match");
2174
2175 // exception object is not added to oop map by LinearScan
2176 // (LinearScan assumes that no oops are in fixed registers)
2177 info->add_register_oop(exceptionOop);
2178 StubId unwind_id;
2179
2180 // get current pc information
2181 // pc is only needed if the method has an exception handler, the unwind code does not need it.
2182 int pc_for_athrow_offset = __ offset();
2183 InternalAddress pc_for_athrow(__ pc());
2184 __ lea(exceptionPC->as_register(), pc_for_athrow);
2185 add_call_info(pc_for_athrow_offset, info); // for exception handler
2186
2187 __ verify_not_null_oop(rax);
2188 // search an exception handler (rax: exception oop, rdx: throwing pc)
2189 if (compilation()->has_fpu_code()) {
2190 unwind_id = StubId::c1_handle_exception_id;
2191 } else {
2192 unwind_id = StubId::c1_handle_exception_nofpu_id;
2193 }
2194 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2195
2196 // enough room for two byte trap
2197 __ nop();
2198 }
2199
2200
2201 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2202 assert(exceptionOop->as_register() == rax, "must match");
2203
2204 __ jmp(_unwind_handler_entry);
2205 }
2206
2207
2208 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2209
2210 // optimized version for linear scan:
2211 // * count must be already in ECX (guaranteed by LinearScan)
2212 // * left and dest must be equal
2213 // * tmp must be unused
2214 assert(count->as_register() == SHIFT_count, "count must be in ECX");
2215 assert(left == dest, "left and dest must be equal");
2216 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2217
2218 if (left->is_single_cpu()) {
2219 Register value = left->as_register();
2220 assert(value != SHIFT_count, "left cannot be ECX");
2221
2222 switch (code) {
2223 case lir_shl: __ shll(value); break;
2224 case lir_shr: __ sarl(value); break;
2225 case lir_ushr: __ shrl(value); break;
2226 default: ShouldNotReachHere();
2227 }
2228 } else if (left->is_double_cpu()) {
2229 Register lo = left->as_register_lo();
2230 Register hi = left->as_register_hi();
2231 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2232 switch (code) {
2233 case lir_shl: __ shlptr(lo); break;
2234 case lir_shr: __ sarptr(lo); break;
2235 case lir_ushr: __ shrptr(lo); break;
2236 default: ShouldNotReachHere();
2237 }
2238 } else {
2239 ShouldNotReachHere();
2240 }
2241 }
2242
2243
2244 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2245 if (dest->is_single_cpu()) {
2246 // first move left into dest so that left is not destroyed by the shift
2247 Register value = dest->as_register();
2248 count = count & 0x1F; // Java spec
2249
2250 move_regs(left->as_register(), value);
2251 switch (code) {
2252 case lir_shl: __ shll(value, count); break;
2253 case lir_shr: __ sarl(value, count); break;
2254 case lir_ushr: __ shrl(value, count); break;
2255 default: ShouldNotReachHere();
2256 }
2257 } else if (dest->is_double_cpu()) {
2258 // first move left into dest so that left is not destroyed by the shift
2259 Register value = dest->as_register_lo();
2260 count = count & 0x1F; // Java spec
2261
2262 move_regs(left->as_register_lo(), value);
2263 switch (code) {
2264 case lir_shl: __ shlptr(value, count); break;
2265 case lir_shr: __ sarptr(value, count); break;
2266 case lir_ushr: __ shrptr(value, count); break;
2267 default: ShouldNotReachHere();
2268 }
2269 } else {
2270 ShouldNotReachHere();
2271 }
2272 }
2273
2274
2275 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2276 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2277 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2278 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2279 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
2280 }
2281
2282
2283 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2284 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2285 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2286 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2287 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
2288 }
2289
2290
2291 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2292 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2293 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2294 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2295 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
2296 }
2297
2298
2299 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
2300 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2301 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2302 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2303 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
2304 }
2305
2306
2307 // This code replaces a call to arraycopy; no exception may
2308 // be thrown in this code, they must be thrown in the System.arraycopy
2309 // activation frame; we could save some checks if this would not be the case
2310 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2311 ciArrayKlass* default_type = op->expected_type();
2312 Register src = op->src()->as_register();
2313 Register dst = op->dst()->as_register();
2314 Register src_pos = op->src_pos()->as_register();
2315 Register dst_pos = op->dst_pos()->as_register();
2316 Register length = op->length()->as_register();
2317 Register tmp = op->tmp()->as_register();
2318 Register tmp_load_klass = rscratch1;
2319 Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
2320
2321 CodeStub* stub = op->stub();
2322 int flags = op->flags();
2323 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2324 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2325
2326 // if we don't know anything, just go through the generic arraycopy
2327 if (default_type == nullptr) {
2328 // save outgoing arguments on stack in case call to System.arraycopy is needed
2329 // HACK ALERT. This code used to push the parameters in a hardwired fashion
2330 // for interpreter calling conventions. Now we have to do it in new style conventions.
2331 // For the moment until C1 gets the new register allocator I just force all the
2332 // args to the right place (except the register args) and then on the back side
2333 // reload the register args properly if we go slow path. Yuck
2334
2335 // These are proper for the calling convention
2336 store_parameter(length, 2);
2337 store_parameter(dst_pos, 1);
2338 store_parameter(dst, 0);
2339
2340 // these are just temporary placements until we need to reload
2341 store_parameter(src_pos, 3);
2342 store_parameter(src, 4);
2343
2344 address copyfunc_addr = StubRoutines::generic_arraycopy();
2345 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2346
2347 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
2348 // The arguments are in java calling convention so we can trivially shift them to C
2349 // convention
2350 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2351 __ mov(c_rarg0, j_rarg0);
2352 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2353 __ mov(c_rarg1, j_rarg1);
2354 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2355 __ mov(c_rarg2, j_rarg2);
2356 assert_different_registers(c_rarg3, j_rarg4);
2357 __ mov(c_rarg3, j_rarg3);
2358 #ifdef _WIN64
2359 // Allocate abi space for args but be sure to keep stack aligned
2360 __ subptr(rsp, 6*wordSize);
2361 store_parameter(j_rarg4, 4);
2362 #ifndef PRODUCT
2363 if (PrintC1Statistics) {
2364 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2365 }
2366 #endif
2367 __ call(RuntimeAddress(copyfunc_addr));
2368 __ addptr(rsp, 6*wordSize);
2369 #else
2370 __ mov(c_rarg4, j_rarg4);
2371 #ifndef PRODUCT
2372 if (PrintC1Statistics) {
2373 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2374 }
2375 #endif
2376 __ call(RuntimeAddress(copyfunc_addr));
2377 #endif // _WIN64
2378
2379 __ testl(rax, rax);
2380 __ jcc(Assembler::equal, *stub->continuation());
2381
2382 __ mov(tmp, rax);
2383 __ xorl(tmp, -1);
2384
2385 // Reload values from the stack so they are where the stub
2386 // expects them.
2387 __ movptr (dst, Address(rsp, 0*BytesPerWord));
2388 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
2389 __ movptr (length, Address(rsp, 2*BytesPerWord));
2390 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
2391 __ movptr (src, Address(rsp, 4*BytesPerWord));
2392
2393 __ subl(length, tmp);
2394 __ addl(src_pos, tmp);
2395 __ addl(dst_pos, tmp);
2396 __ jmp(*stub->entry());
2397
2398 __ bind(*stub->continuation());
2399 return;
2400 }
2401
2402 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2403
2404 int elem_size = type2aelembytes(basic_type);
2405 Address::ScaleFactor scale;
2406
2407 switch (elem_size) {
2408 case 1 :
2409 scale = Address::times_1;
2410 break;
2411 case 2 :
2412 scale = Address::times_2;
2413 break;
2414 case 4 :
2415 scale = Address::times_4;
2416 break;
2417 case 8 :
2418 scale = Address::times_8;
2419 break;
2420 default:
2421 scale = Address::no_scale;
2422 ShouldNotReachHere();
2423 }
2424
2425 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2426 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2427
2428 // length and pos's are all sign extended at this point on 64bit
2429
2430 // test for null
2431 if (flags & LIR_OpArrayCopy::src_null_check) {
2432 __ testptr(src, src);
2433 __ jcc(Assembler::zero, *stub->entry());
2434 }
2435 if (flags & LIR_OpArrayCopy::dst_null_check) {
2436 __ testptr(dst, dst);
2437 __ jcc(Assembler::zero, *stub->entry());
2438 }
2439
2440 // If the compiler was not able to prove that exact type of the source or the destination
2441 // of the arraycopy is an array type, check at runtime if the source or the destination is
2442 // an instance type.
2443 if (flags & LIR_OpArrayCopy::type_check) {
2444 if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2445 __ load_klass(tmp, dst, tmp_load_klass);
2446 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2447 __ jcc(Assembler::greaterEqual, *stub->entry());
2448 }
2449
2450 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2451 __ load_klass(tmp, src, tmp_load_klass);
2452 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2453 __ jcc(Assembler::greaterEqual, *stub->entry());
2454 }
2455 }
2456
2457 // check if negative
2458 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2459 __ testl(src_pos, src_pos);
2460 __ jcc(Assembler::less, *stub->entry());
2461 }
2462 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2463 __ testl(dst_pos, dst_pos);
2464 __ jcc(Assembler::less, *stub->entry());
2465 }
2466
2467 if (flags & LIR_OpArrayCopy::src_range_check) {
2468 __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
2469 __ cmpl(tmp, src_length_addr);
2470 __ jcc(Assembler::above, *stub->entry());
2471 }
2472 if (flags & LIR_OpArrayCopy::dst_range_check) {
2473 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
2474 __ cmpl(tmp, dst_length_addr);
2475 __ jcc(Assembler::above, *stub->entry());
2476 }
2477
2478 if (flags & LIR_OpArrayCopy::length_positive_check) {
2479 __ testl(length, length);
2480 __ jcc(Assembler::less, *stub->entry());
2481 }
2482
2483 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
2484 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
2485
2486 if (flags & LIR_OpArrayCopy::type_check) {
2487 // We don't know the array types are compatible
2488 if (basic_type != T_OBJECT) {
2489 // Simple test for basic type arrays
2490 __ cmp_klasses_from_objects(src, dst, tmp, tmp2);
2491 __ jcc(Assembler::notEqual, *stub->entry());
2492 } else {
2493 // For object arrays, if src is a sub class of dst then we can
2494 // safely do the copy.
2495 Label cont, slow;
2496
2497 __ push_ppx(src);
2498 __ push_ppx(dst);
2499
2500 __ load_klass(src, src, tmp_load_klass);
2501 __ load_klass(dst, dst, tmp_load_klass);
2502
2503 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2504
2505 __ push_ppx(src);
2506 __ push_ppx(dst);
2507 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2508 __ pop_ppx(dst);
2509 __ pop_ppx(src);
2510
2511 __ testl(src, src);
2512 __ jcc(Assembler::notEqual, cont);
2513
2514 __ bind(slow);
2515 __ pop_ppx(dst);
2516 __ pop_ppx(src);
2517
2518 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2519 if (copyfunc_addr != nullptr) { // use stub if available
2520 // src is not a sub class of dst so we have to do a
2521 // per-element check.
2522
2523 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2524 if ((flags & mask) != mask) {
2525 // Check that at least both of them object arrays.
2526 assert(flags & mask, "one of the two should be known to be an object array");
2527
2528 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2529 __ load_klass(tmp, src, tmp_load_klass);
2530 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2531 __ load_klass(tmp, dst, tmp_load_klass);
2532 }
2533 int lh_offset = in_bytes(Klass::layout_helper_offset());
2534 Address klass_lh_addr(tmp, lh_offset);
2535 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2536 __ cmpl(klass_lh_addr, objArray_lh);
2537 __ jcc(Assembler::notEqual, *stub->entry());
2538 }
2539
2540 // Spill because stubs can use any register they like and it's
2541 // easier to restore just those that we care about.
2542 store_parameter(dst, 0);
2543 store_parameter(dst_pos, 1);
2544 store_parameter(length, 2);
2545 store_parameter(src_pos, 3);
2546 store_parameter(src, 4);
2547
2548 __ movl2ptr(length, length); //higher 32bits must be null
2549
2550 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2551 assert_different_registers(c_rarg0, dst, dst_pos, length);
2552 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2553 assert_different_registers(c_rarg1, dst, length);
2554
2555 __ mov(c_rarg2, length);
2556 assert_different_registers(c_rarg2, dst);
2557
2558 #ifdef _WIN64
2559 // Allocate abi space for args but be sure to keep stack aligned
2560 __ subptr(rsp, 6*wordSize);
2561 __ load_klass(c_rarg3, dst, tmp_load_klass);
2562 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
2563 store_parameter(c_rarg3, 4);
2564 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
2565 __ call(RuntimeAddress(copyfunc_addr));
2566 __ addptr(rsp, 6*wordSize);
2567 #else
2568 __ load_klass(c_rarg4, dst, tmp_load_klass);
2569 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2570 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2571 __ call(RuntimeAddress(copyfunc_addr));
2572 #endif
2573
2574 #ifndef PRODUCT
2575 if (PrintC1Statistics) {
2576 Label failed;
2577 __ testl(rax, rax);
2578 __ jcc(Assembler::notZero, failed);
2579 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1);
2580 __ bind(failed);
2581 }
2582 #endif
2583
2584 __ testl(rax, rax);
2585 __ jcc(Assembler::zero, *stub->continuation());
2586
2587 #ifndef PRODUCT
2588 if (PrintC1Statistics) {
2589 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1);
2590 }
2591 #endif
2592
2593 __ mov(tmp, rax);
2594
2595 __ xorl(tmp, -1);
2596
2597 // Restore previously spilled arguments
2598 __ movptr (dst, Address(rsp, 0*BytesPerWord));
2599 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
2600 __ movptr (length, Address(rsp, 2*BytesPerWord));
2601 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
2602 __ movptr (src, Address(rsp, 4*BytesPerWord));
2603
2604
2605 __ subl(length, tmp);
2606 __ addl(src_pos, tmp);
2607 __ addl(dst_pos, tmp);
2608 }
2609
2610 __ jmp(*stub->entry());
2611
2612 __ bind(cont);
2613 __ pop(dst);
2614 __ pop(src);
2615 }
2616 }
2617
2618 #ifdef ASSERT
2619 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2620 // Sanity check the known type with the incoming class. For the
2621 // primitive case the types must match exactly with src.klass and
2622 // dst.klass each exactly matching the default type. For the
2623 // object array case, if no type check is needed then either the
2624 // dst type is exactly the expected type and the src type is a
2625 // subtype which we can't check or src is the same array as dst
2626 // but not necessarily exactly of type default_type.
2627 Label known_ok, halt;
2628 __ mov_metadata(tmp, default_type->constant_encoding());
2629 __ encode_klass_not_null(tmp, rscratch1);
2630
2631 if (basic_type != T_OBJECT) {
2632 __ cmp_klass(tmp, dst, tmp2);
2633 __ jcc(Assembler::notEqual, halt);
2634 __ cmp_klass(tmp, src, tmp2);
2635 __ jcc(Assembler::equal, known_ok);
2636 } else {
2637 __ cmp_klass(tmp, dst, tmp2);
2638 __ jcc(Assembler::equal, known_ok);
2639 __ cmpptr(src, dst);
2640 __ jcc(Assembler::equal, known_ok);
2641 }
2642 __ bind(halt);
2643 __ stop("incorrect type information in arraycopy");
2644 __ bind(known_ok);
2645 }
2646 #endif
2647
2648 #ifndef PRODUCT
2649 if (PrintC1Statistics) {
2650 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1);
2651 }
2652 #endif
2653
2654 assert_different_registers(c_rarg0, dst, dst_pos, length);
2655 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2656 assert_different_registers(c_rarg1, length);
2657 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2658 __ mov(c_rarg2, length);
2659
2660 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2661 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2662 const char *name;
2663 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2664 __ call_VM_leaf(entry, 0);
2665
2666 if (stub != nullptr) {
2667 __ bind(*stub->continuation());
2668 }
2669 }
2670
2671 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2672 assert(op->crc()->is_single_cpu(), "crc must be register");
2673 assert(op->val()->is_single_cpu(), "byte value must be register");
2674 assert(op->result_opr()->is_single_cpu(), "result must be register");
2675 Register crc = op->crc()->as_register();
2676 Register val = op->val()->as_register();
2677 Register res = op->result_opr()->as_register();
2678
2679 assert_different_registers(val, crc, res);
2680
2681 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
2682 __ notl(crc); // ~crc
2683 __ update_byte_crc32(crc, val, res);
2684 __ notl(crc); // ~crc
2685 __ mov(res, crc);
2686 }
2687
2688 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2689 Register obj = op->obj_opr()->as_register(); // may not be an oop
2690 Register hdr = op->hdr_opr()->as_register();
2691 Register lock = op->lock_opr()->as_register();
2692 if (op->code() == lir_lock) {
2693 Register tmp = op->scratch_opr()->as_register();
2694 // add debug info for NullPointerException only if one is possible
2695 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2696 if (op->info() != nullptr) {
2697 add_debug_info_for_null_check(null_check_offset, op->info());
2698 }
2699 // done
2700 } else if (op->code() == lir_unlock) {
2701 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2702 } else {
2703 Unimplemented();
2704 }
2705 __ bind(*op->stub()->continuation());
2706 }
2707
2708 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2709 Register obj = op->obj()->as_pointer_register();
2710 Register result = op->result_opr()->as_pointer_register();
2711
2712 CodeEmitInfo* info = op->info();
2713 if (info != nullptr) {
2714 add_debug_info_for_null_check_here(info);
2715 }
2716
2717 __ load_klass(result, obj, rscratch1);
2718 }
2719
2720 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2721 ciMethod* method = op->profiled_method();
2722 int bci = op->profiled_bci();
2723 ciMethod* callee = op->profiled_callee();
2724 Register tmp_load_klass = rscratch1;
2725
2726 // Update counter for all call types
2727 ciMethodData* md = method->method_data_or_null();
2728 assert(md != nullptr, "Sanity");
2729 ciProfileData* data = md->bci_to_data(bci);
2730 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2731 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2732 Register mdo = op->mdo()->as_register();
2733 __ mov_metadata(mdo, md->constant_encoding());
2734 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2735 // Perform additional virtual call profiling for invokevirtual and
2736 // invokeinterface bytecodes
2737 if (op->should_profile_receiver_type()) {
2738 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2739 Register recv = op->recv()->as_register();
2740 assert_different_registers(mdo, recv);
2741 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2742 ciKlass* known_klass = op->known_holder();
2743 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2744 // We know the type that will be seen at this call site; we can
2745 // statically update the MethodData* rather than needing to do
2746 // dynamic tests on the receiver type.
2747 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2748 for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
2749 ciKlass* receiver = vc_data->receiver(i);
2750 if (known_klass->equals(receiver)) {
2751 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2752 __ addptr(data_addr, DataLayout::counter_increment);
2753 return;
2754 }
2755 }
2756 // Receiver type is not found in profile data.
2757 // Fall back to runtime helper to handle the rest at runtime.
2758 __ mov_metadata(recv, known_klass->constant_encoding());
2759 } else {
2760 __ load_klass(recv, recv, tmp_load_klass);
2761 }
2762 type_profile_helper(mdo, md, data, recv);
2763 } else {
2764 // Static call
2765 __ addptr(counter_addr, DataLayout::counter_increment);
2766 }
2767 }
2768
2769 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2770 Register obj = op->obj()->as_register();
2771 Register tmp = op->tmp()->as_pointer_register();
2772 Register tmp_load_klass = rscratch1;
2773 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2774 ciKlass* exact_klass = op->exact_klass();
2775 intptr_t current_klass = op->current_klass();
2776 bool not_null = op->not_null();
2777 bool no_conflict = op->no_conflict();
2778
2779 Label update, next, none;
2780
2781 bool do_null = !not_null;
2782 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2783 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2784
2785 assert(do_null || do_update, "why are we here?");
2786 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2787
2788 __ verify_oop(obj);
2789
2790 #ifdef ASSERT
2791 if (obj == tmp) {
2792 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
2793 } else {
2794 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index());
2795 }
2796 #endif
2797 if (do_null) {
2798 __ testptr(obj, obj);
2799 __ jccb(Assembler::notZero, update);
2800 if (!TypeEntries::was_null_seen(current_klass)) {
2801 __ testptr(mdo_addr, TypeEntries::null_seen);
2802 #ifndef ASSERT
2803 __ jccb(Assembler::notZero, next); // already set
2804 #else
2805 __ jcc(Assembler::notZero, next); // already set
2806 #endif
2807 // atomic update to prevent overwriting Klass* with 0
2808 __ lock();
2809 __ orptr(mdo_addr, TypeEntries::null_seen);
2810 }
2811 if (do_update) {
2812 #ifndef ASSERT
2813 __ jmpb(next);
2814 }
2815 #else
2816 __ jmp(next);
2817 }
2818 } else {
2819 __ testptr(obj, obj);
2820 __ jcc(Assembler::notZero, update);
2821 __ stop("unexpected null obj");
2822 #endif
2823 }
2824
2825 __ bind(update);
2826
2827 if (do_update) {
2828 #ifdef ASSERT
2829 if (exact_klass != nullptr) {
2830 Label ok;
2831 __ load_klass(tmp, obj, tmp_load_klass);
2832 __ push_ppx(tmp);
2833 __ mov_metadata(tmp, exact_klass->constant_encoding());
2834 __ cmpptr(tmp, Address(rsp, 0));
2835 __ jcc(Assembler::equal, ok);
2836 __ stop("exact klass and actual klass differ");
2837 __ bind(ok);
2838 __ pop_ppx(tmp);
2839 }
2840 #endif
2841 if (!no_conflict) {
2842 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2843 if (exact_klass != nullptr) {
2844 __ mov_metadata(tmp, exact_klass->constant_encoding());
2845 } else {
2846 __ load_klass(tmp, obj, tmp_load_klass);
2847 }
2848 __ mov(rscratch1, tmp); // save original value before XOR
2849 __ xorptr(tmp, mdo_addr);
2850 __ testptr(tmp, TypeEntries::type_klass_mask);
2851 // klass seen before, nothing to do. The unknown bit may have been
2852 // set already but no need to check.
2853 __ jccb(Assembler::zero, next);
2854
2855 __ testptr(tmp, TypeEntries::type_unknown);
2856 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2857
2858 if (TypeEntries::is_type_none(current_klass)) {
2859 __ testptr(mdo_addr, TypeEntries::type_mask);
2860 __ jccb(Assembler::zero, none);
2861 // There is a chance that the checks above (re-reading profiling
2862 // data from memory) fail if another thread has just set the
2863 // profiling to this obj's klass
2864 __ mov(tmp, rscratch1); // get back original value before XOR
2865 __ xorptr(tmp, mdo_addr);
2866 __ testptr(tmp, TypeEntries::type_klass_mask);
2867 __ jccb(Assembler::zero, next);
2868 }
2869 } else {
2870 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2871 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2872
2873 __ testptr(mdo_addr, TypeEntries::type_unknown);
2874 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2875 }
2876
2877 // different than before. Cannot keep accurate profile.
2878 __ orptr(mdo_addr, TypeEntries::type_unknown);
2879
2880 if (TypeEntries::is_type_none(current_klass)) {
2881 __ jmpb(next);
2882
2883 __ bind(none);
2884 // first time here. Set profile type.
2885 __ movptr(mdo_addr, tmp);
2886 #ifdef ASSERT
2887 __ andptr(tmp, TypeEntries::type_klass_mask);
2888 __ verify_klass_ptr(tmp);
2889 #endif
2890 }
2891 } else {
2892 // There's a single possible klass at this profile point
2893 assert(exact_klass != nullptr, "should be");
2894 if (TypeEntries::is_type_none(current_klass)) {
2895 __ mov_metadata(tmp, exact_klass->constant_encoding());
2896 __ xorptr(tmp, mdo_addr);
2897 __ testptr(tmp, TypeEntries::type_klass_mask);
2898 #ifdef ASSERT
2899 __ jcc(Assembler::zero, next);
2900
2901 {
2902 Label ok;
2903 __ push_ppx(tmp);
2904 __ testptr(mdo_addr, TypeEntries::type_mask);
2905 __ jcc(Assembler::zero, ok);
2906 // may have been set by another thread
2907 __ mov_metadata(tmp, exact_klass->constant_encoding());
2908 __ xorptr(tmp, mdo_addr);
2909 __ testptr(tmp, TypeEntries::type_mask);
2910 __ jcc(Assembler::zero, ok);
2911
2912 __ stop("unexpected profiling mismatch");
2913 __ bind(ok);
2914 __ pop_ppx(tmp);
2915 }
2916 #else
2917 __ jccb(Assembler::zero, next);
2918 #endif
2919 // first time here. Set profile type.
2920 __ movptr(mdo_addr, tmp);
2921 #ifdef ASSERT
2922 __ andptr(tmp, TypeEntries::type_klass_mask);
2923 __ verify_klass_ptr(tmp);
2924 #endif
2925 } else {
2926 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2927 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2928
2929 __ testptr(mdo_addr, TypeEntries::type_unknown);
2930 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2931
2932 __ orptr(mdo_addr, TypeEntries::type_unknown);
2933 }
2934 }
2935 }
2936 __ bind(next);
2937 }
2938
2939 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2940 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2941 }
2942
2943
2944 void LIR_Assembler::align_backward_branch_target() {
2945 __ align(BytesPerWord);
2946 }
2947
2948
2949 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2950 if (left->is_single_cpu()) {
2951 __ negl(left->as_register());
2952 move_regs(left->as_register(), dest->as_register());
2953
2954 } else if (left->is_double_cpu()) {
2955 Register lo = left->as_register_lo();
2956 Register dst = dest->as_register_lo();
2957 __ movptr(dst, lo);
2958 __ negptr(dst);
2959
2960 } else if (dest->is_single_xmm()) {
2961 assert(!tmp->is_valid(), "do not need temporary");
2962 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
2963 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
2964 }
2965 __ xorps(dest->as_xmm_float_reg(),
2966 ExternalAddress((address)float_signflip_pool),
2967 rscratch1);
2968 } else if (dest->is_double_xmm()) {
2969 assert(!tmp->is_valid(), "do not need temporary");
2970 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
2971 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
2972 }
2973 __ xorpd(dest->as_xmm_double_reg(),
2974 ExternalAddress((address)double_signflip_pool),
2975 rscratch1);
2976 } else {
2977 ShouldNotReachHere();
2978 }
2979 }
2980
2981
2982 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2983 assert(src->is_address(), "must be an address");
2984 assert(dest->is_register(), "must be a register");
2985
2986 PatchingStub* patch = nullptr;
2987 if (patch_code != lir_patch_none) {
2988 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
2989 }
2990
2991 Register reg = dest->as_pointer_register();
2992 LIR_Address* addr = src->as_address_ptr();
2993 __ lea(reg, as_Address(addr));
2994
2995 if (patch != nullptr) {
2996 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
2997 }
2998 }
2999
3000
3001
3002 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3003 assert(!tmp->is_valid(), "don't need temporary");
3004 __ call(RuntimeAddress(dest));
3005 if (info != nullptr) {
3006 add_call_info_here(info);
3007 }
3008 __ post_call_nop();
3009 }
3010
3011
3012 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3013 assert(type == T_LONG, "only for volatile long fields");
3014
3015 if (info != nullptr) {
3016 add_debug_info_for_null_check_here(info);
3017 }
3018
3019 if (src->is_double_xmm()) {
3020 if (dest->is_double_cpu()) {
3021 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3022 } else if (dest->is_double_stack()) {
3023 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3024 } else if (dest->is_address()) {
3025 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3026 } else {
3027 ShouldNotReachHere();
3028 }
3029
3030 } else if (dest->is_double_xmm()) {
3031 if (src->is_double_stack()) {
3032 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3033 } else if (src->is_address()) {
3034 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3035 } else {
3036 ShouldNotReachHere();
3037 }
3038
3039 } else {
3040 ShouldNotReachHere();
3041 }
3042 }
3043
3044 #ifdef ASSERT
3045 // emit run-time assertion
3046 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3047 assert(op->code() == lir_assert, "must be");
3048
3049 if (op->in_opr1()->is_valid()) {
3050 assert(op->in_opr2()->is_valid(), "both operands must be valid");
3051 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3052 } else {
3053 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3054 assert(op->condition() == lir_cond_always, "no other conditions allowed");
3055 }
3056
3057 Label ok;
3058 if (op->condition() != lir_cond_always) {
3059 Assembler::Condition acond = Assembler::zero;
3060 switch (op->condition()) {
3061 case lir_cond_equal: acond = Assembler::equal; break;
3062 case lir_cond_notEqual: acond = Assembler::notEqual; break;
3063 case lir_cond_less: acond = Assembler::less; break;
3064 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3065 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3066 case lir_cond_greater: acond = Assembler::greater; break;
3067 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
3068 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
3069 default: ShouldNotReachHere();
3070 }
3071 __ jcc(acond, ok);
3072 }
3073 if (op->halt()) {
3074 const char* str = __ code_string(op->msg());
3075 __ stop(str);
3076 } else {
3077 breakpoint();
3078 }
3079 __ bind(ok);
3080 }
3081 #endif
3082
3083 void LIR_Assembler::membar() {
3084 // QQQ sparc TSO uses this,
3085 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3086 }
3087
3088 void LIR_Assembler::membar_acquire() {
3089 // No x86 machines currently require load fences
3090 }
3091
3092 void LIR_Assembler::membar_release() {
3093 // No x86 machines currently require store fences
3094 }
3095
3096 void LIR_Assembler::membar_loadload() {
3097 // no-op
3098 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3099 }
3100
3101 void LIR_Assembler::membar_storestore() {
3102 // no-op
3103 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3104 }
3105
3106 void LIR_Assembler::membar_loadstore() {
3107 // no-op
3108 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3109 }
3110
3111 void LIR_Assembler::membar_storeload() {
3112 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3113 }
3114
3115 void LIR_Assembler::on_spin_wait() {
3116 __ pause ();
3117 }
3118
3119 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3120 assert(result_reg->is_register(), "check");
3121 __ mov(result_reg->as_register(), r15_thread);
3122 }
3123
3124
3125 void LIR_Assembler::peephole(LIR_List*) {
3126 // do nothing for now
3127 }
3128
3129 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3130 assert(data == dest, "xchg/xadd uses only 2 operands");
3131
3132 if (data->type() == T_INT) {
3133 if (code == lir_xadd) {
3134 __ lock();
3135 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3136 } else {
3137 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3138 }
3139 } else if (data->is_oop()) {
3140 assert (code == lir_xchg, "xadd for oops");
3141 Register obj = data->as_register();
3142 if (UseCompressedOops) {
3143 __ encode_heap_oop(obj);
3144 __ xchgl(obj, as_Address(src->as_address_ptr()));
3145 __ decode_heap_oop(obj);
3146 } else {
3147 __ xchgptr(obj, as_Address(src->as_address_ptr()));
3148 }
3149 } else if (data->type() == T_LONG) {
3150 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3151 if (code == lir_xadd) {
3152 __ lock();
3153 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3154 } else {
3155 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3156 }
3157 } else {
3158 ShouldNotReachHere();
3159 }
3160 }
3161
3162 #undef __