1 /*
2 * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "code/aotCodeCache.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gc_globals.hpp"
39 #include "nativeInst_x86.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "runtime/threadIdentifier.hpp"
46 #include "utilities/powerOfTwo.hpp"
47 #include "vmreg_x86.inline.hpp"
48
49
50 // These masks are used to provide 128-bit aligned bitmasks to the XMM
51 // instructions, to allow sign-masking or sign-bit flipping. They allow
52 // fast versions of NegF/NegD and AbsF/AbsD.
53
54 // Note: 'double' and 'long long' have 32-bits alignment on x86.
55 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
56 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
57 // of 128-bits operands for SSE instructions.
58 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
59 // Store the value to a 128-bits operand.
60 operand[0] = lo;
61 operand[1] = hi;
62 return operand;
63 }
64
65 // Buffer for 128-bits masks used by SSE instructions.
66 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
67
68 // Static initialization during VM startup.
69 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
70 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
71 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
72 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
73
74 #if INCLUDE_CDS
75 // publish external addresses defined in this file
76 void LIR_Assembler::init_AOTAddressTable(GrowableArray<address>& external_addresses) {
77 #define ADD(addr) external_addresses.append((address)(addr));
78 ADD(float_signmask_pool);
79 ADD(double_signmask_pool);
80 ADD(float_signflip_pool);
81 ADD(double_signflip_pool);
82 #undef ADD
83 }
84 #endif // INCLUDE_CDS
85
86 NEEDS_CLEANUP // remove this definitions ?
87 const Register SYNC_header = rax; // synchronization header
88 const Register SHIFT_count = rcx; // where count for shift operations must be
89
90 #define __ _masm->
91
92 static void select_different_registers(Register preserve,
93 Register extra,
94 Register &tmp1,
95 Register &tmp2,
96 Register &tmp3) {
97 if (tmp1 == preserve) {
98 assert_different_registers(tmp1, tmp2, tmp3, extra);
99 tmp1 = extra;
100 } else if (tmp2 == preserve) {
101 assert_different_registers(tmp1, tmp2, tmp3, extra);
102 tmp2 = extra;
103 } else if (tmp3 == preserve) {
104 assert_different_registers(tmp1, tmp2, tmp3, extra);
105 tmp3 = extra;
106 }
107 assert_different_registers(preserve, tmp1, tmp2, tmp3);
108 }
109
110
111
112 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
113 if (opr->is_constant()) {
114 LIR_Const* constant = opr->as_constant_ptr();
115 switch (constant->type()) {
116 case T_INT: {
117 return true;
118 }
119
120 default:
121 return false;
122 }
123 }
124 return false;
125 }
126
127
128 LIR_Opr LIR_Assembler::receiverOpr() {
129 return FrameMap::receiver_opr;
130 }
131
132 LIR_Opr LIR_Assembler::osrBufferPointer() {
133 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
134 }
135
136 //--------------fpu register translations-----------------------
137
138
139 address LIR_Assembler::float_constant(float f) {
140 address const_addr = __ float_constant(f);
141 if (const_addr == nullptr) {
142 bailout("const section overflow");
143 return __ code()->consts()->start();
144 } else {
145 return const_addr;
146 }
147 }
148
149
150 address LIR_Assembler::double_constant(double d) {
151 address const_addr = __ double_constant(d);
152 if (const_addr == nullptr) {
153 bailout("const section overflow");
154 return __ code()->consts()->start();
155 } else {
156 return const_addr;
157 }
158 }
159
160 void LIR_Assembler::breakpoint() {
161 __ int3();
162 }
163
164 void LIR_Assembler::push(LIR_Opr opr) {
165 if (opr->is_single_cpu()) {
166 __ push_reg(opr->as_register());
167 } else if (opr->is_double_cpu()) {
168 __ push_reg(opr->as_register_lo());
169 } else if (opr->is_stack()) {
170 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
171 } else if (opr->is_constant()) {
172 LIR_Const* const_opr = opr->as_constant_ptr();
173 if (const_opr->type() == T_OBJECT) {
174 __ push_oop(const_opr->as_jobject(), rscratch1);
175 } else if (const_opr->type() == T_INT) {
176 __ push_jint(const_opr->as_jint());
177 } else {
178 ShouldNotReachHere();
179 }
180
181 } else {
182 ShouldNotReachHere();
183 }
184 }
185
186 void LIR_Assembler::pop(LIR_Opr opr) {
187 if (opr->is_single_cpu()) {
188 __ pop_reg(opr->as_register());
189 } else {
190 ShouldNotReachHere();
191 }
192 }
193
194 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
195 return addr->base()->is_illegal() && addr->index()->is_illegal();
196 }
197
198 //-------------------------------------------
199
200 Address LIR_Assembler::as_Address(LIR_Address* addr) {
201 return as_Address(addr, rscratch1);
202 }
203
204 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
205 if (addr->base()->is_illegal()) {
206 assert(addr->index()->is_illegal(), "must be illegal too");
207 AddressLiteral laddr((address)addr->disp(), relocInfo::none);
208 if (! __ reachable(laddr)) {
209 __ movptr(tmp, laddr.addr());
210 Address res(tmp, 0);
211 return res;
212 } else {
213 return __ as_Address(laddr);
214 }
215 }
216
217 Register base = addr->base()->as_pointer_register();
218
219 if (addr->index()->is_illegal()) {
220 return Address( base, addr->disp());
221 } else if (addr->index()->is_cpu_register()) {
222 Register index = addr->index()->as_pointer_register();
223 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
224 } else if (addr->index()->is_constant()) {
225 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
226 assert(Assembler::is_simm32(addr_offset), "must be");
227
228 return Address(base, addr_offset);
229 } else {
230 Unimplemented();
231 return Address();
232 }
233 }
234
235
236 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
237 Address base = as_Address(addr);
238 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
239 }
240
241
242 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
243 return as_Address(addr);
244 }
245
246
247 void LIR_Assembler::osr_entry() {
248 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
249 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
250 ValueStack* entry_state = osr_entry->state();
251 int number_of_locks = entry_state->locks_size();
252
253 // we jump here if osr happens with the interpreter
254 // state set up to continue at the beginning of the
255 // loop that triggered osr - in particular, we have
256 // the following registers setup:
257 //
258 // rcx: osr buffer
259 //
260
261 // build frame
262 ciMethod* m = compilation()->method();
263 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
264
265 // OSR buffer is
266 //
267 // locals[nlocals-1..0]
268 // monitors[0..number_of_locks]
269 //
270 // locals is a direct copy of the interpreter frame so in the osr buffer
271 // so first slot in the local array is the last local from the interpreter
272 // and last slot is local[0] (receiver) from the interpreter
273 //
274 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
275 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
276 // in the interpreter frame (the method lock if a sync method)
277
278 // Initialize monitors in the compiled activation.
279 // rcx: pointer to osr buffer
280 //
281 // All other registers are dead at this point and the locals will be
282 // copied into place by code emitted in the IR.
283
284 Register OSR_buf = osrBufferPointer()->as_pointer_register();
285 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
286 int monitor_offset = BytesPerWord * method()->max_locals() +
287 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
288 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
289 // the OSR buffer using 2 word entries: first the lock and then
290 // the oop.
291 for (int i = 0; i < number_of_locks; i++) {
292 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
293 #ifdef ASSERT
294 // verify the interpreter's monitor has a non-null object
295 {
296 Label L;
297 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
298 __ jcc(Assembler::notZero, L);
299 __ stop("locked object is null");
300 __ bind(L);
301 }
302 #endif
303 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
304 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
305 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
306 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
307 }
308 }
309 }
310
311
312 // inline cache check; done before the frame is built.
313 int LIR_Assembler::check_icache() {
314 return __ ic_check(CodeEntryAlignment);
315 }
316
317 void LIR_Assembler::clinit_barrier(ciMethod* method) {
318 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
319 assert(!method->holder()->is_not_initialized(), "initialization should have been started");
320
321 Label L_skip_barrier;
322 Register klass = rscratch1;
323
324 __ mov_metadata(klass, method->holder()->constant_encoding());
325 __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
326
327 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
328
329 __ bind(L_skip_barrier);
330 }
331
332 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
333 jobject o = nullptr;
334 PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
335 __ movoop(reg, o);
336 patching_epilog(patch, lir_patch_normal, reg, info);
337 }
338
339 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
340 Metadata* o = nullptr;
341 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
342 __ mov_metadata(reg, o);
343 patching_epilog(patch, lir_patch_normal, reg, info);
344 }
345
346 // This specifies the rsp decrement needed to build the frame
347 int LIR_Assembler::initial_frame_size_in_bytes() const {
348 // if rounding, must let FrameMap know!
349
350 // The frame_map records size in slots (32bit word)
351
352 // subtract two words to account for return address and link
353 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
354 }
355
356
357 int LIR_Assembler::emit_exception_handler() {
358 // generate code for exception handler
359 address handler_base = __ start_a_stub(exception_handler_size());
360 if (handler_base == nullptr) {
361 // not enough space left for the handler
362 bailout("exception handler overflow");
363 return -1;
364 }
365
366 int offset = code_offset();
367
368 // the exception oop and pc are in rax, and rdx
369 // no other registers need to be preserved, so invalidate them
370 __ invalidate_registers(false, true, true, false, true, true);
371
372 // check that there is really an exception
373 __ verify_not_null_oop(rax);
374
375 // search an exception handler (rax: exception oop, rdx: throwing pc)
376 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
377 __ should_not_reach_here();
378 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
379 __ end_a_stub();
380
381 return offset;
382 }
383
384
385 // Emit the code to remove the frame from the stack in the exception
386 // unwind path.
387 int LIR_Assembler::emit_unwind_handler() {
388 #ifndef PRODUCT
389 if (CommentedAssembly) {
390 _masm->block_comment("Unwind handler");
391 }
392 #endif
393
394 int offset = code_offset();
395
396 // Fetch the exception from TLS and clear out exception related thread state
397 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
398 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
399 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
400
401 __ bind(_unwind_handler_entry);
402 __ verify_not_null_oop(rax);
403 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
404 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
405 }
406
407 // Perform needed unlocking
408 MonitorExitStub* stub = nullptr;
409 if (method()->is_synchronized()) {
410 monitor_address(0, FrameMap::rax_opr);
411 stub = new MonitorExitStub(FrameMap::rax_opr, 0);
412 __ unlock_object(rdi, rsi, rax, *stub->entry());
413 __ bind(*stub->continuation());
414 }
415
416 if (compilation()->env()->dtrace_method_probes()) {
417 __ mov(rdi, r15_thread);
418 __ mov_metadata(rsi, method()->constant_encoding());
419 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
420 }
421
422 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
423 __ mov(rax, rbx); // Restore the exception
424 }
425
426 // remove the activation and dispatch to the unwind handler
427 __ remove_frame(initial_frame_size_in_bytes());
428 __ jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
429
430 // Emit the slow path assembly
431 if (stub != nullptr) {
432 stub->emit_code(this);
433 }
434
435 return offset;
436 }
437
438
439 int LIR_Assembler::emit_deopt_handler() {
440 // generate code for exception handler
441 address handler_base = __ start_a_stub(deopt_handler_size());
442 if (handler_base == nullptr) {
443 // not enough space left for the handler
444 bailout("deopt handler overflow");
445 return -1;
446 }
447
448 int offset = code_offset();
449
450 Label start;
451 __ bind(start);
452
453 __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
454
455 int entry_offset = __ offset();
456
457 __ jmp(start);
458
459 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
460 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
461 "out of bounds read in post-call NOP check");
462 __ end_a_stub();
463
464 return entry_offset;
465 }
466
467 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
468 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
469 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
470 assert(result->fpu() == 0, "result must already be on TOS");
471 }
472
473 // Pop the stack before the safepoint code
474 __ remove_frame(initial_frame_size_in_bytes());
475
476 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
477 __ reserved_stack_check();
478 }
479
480 // Note: we do not need to round double result; float result has the right precision
481 // the poll sets the condition code, but no data registers
482
483 code_stub->set_safepoint_offset(__ offset());
484 __ relocate(relocInfo::poll_return_type);
485 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
486 __ ret(0);
487 }
488
489
490 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
491 guarantee(info != nullptr, "Shouldn't be null");
492 int offset = __ offset();
493 const Register poll_addr = rscratch1;
494 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
495 add_debug_info_for_branch(info);
496 __ relocate(relocInfo::poll_type);
497 address pre_pc = __ pc();
498 __ testl(rax, Address(poll_addr, 0));
499 address post_pc = __ pc();
500 guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length");
501 return offset;
502 }
503
504
505 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
506 if (from_reg != to_reg) __ mov(to_reg, from_reg);
507 }
508
509 void LIR_Assembler::swap_reg(Register a, Register b) {
510 __ xchgptr(a, b);
511 }
512
513
514 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
515 assert(src->is_constant(), "should not call otherwise");
516 assert(dest->is_register(), "should not call otherwise");
517 LIR_Const* c = src->as_constant_ptr();
518
519 switch (c->type()) {
520 case T_INT: {
521 assert(patch_code == lir_patch_none, "no patching handled here");
522 __ movl(dest->as_register(), c->as_jint());
523 break;
524 }
525
526 case T_ADDRESS: {
527 assert(patch_code == lir_patch_none, "no patching handled here");
528 __ movptr(dest->as_register(), c->as_jint());
529 break;
530 }
531
532 case T_LONG: {
533 #if INCLUDE_CDS
534 if (AOTCodeCache::is_on_for_dump()) {
535 address b = c->as_pointer();
536 if (b == (address)ThreadIdentifier::unsafe_offset()) {
537 __ lea(dest->as_register_lo(), ExternalAddress(b));
538 break;
539 }
540 }
541 #endif
542 assert(patch_code == lir_patch_none, "no patching handled here");
543 #if INCLUDE_CDS
544 if (AOTCodeCache::is_on_for_dump()) {
545 address b = c->as_pointer();
546 if (b == (address)ThreadIdentifier::unsafe_offset()) {
547 __ lea(dest->as_register_lo(), ExternalAddress(b));
548 break;
549 }
550 if (AOTRuntimeConstants::contains(b)) {
551 __ load_aotrc_address(dest->as_register_lo(), b);
552 break;
553 }
554 }
555 #endif
556 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
557 break;
558 }
559
560 case T_OBJECT: {
561 if (patch_code != lir_patch_none) {
562 jobject2reg_with_patching(dest->as_register(), info);
563 } else {
564 __ movoop(dest->as_register(), c->as_jobject());
565 }
566 break;
567 }
568
569 case T_METADATA: {
570 if (patch_code != lir_patch_none) {
571 klass2reg_with_patching(dest->as_register(), info);
572 } else {
573 __ mov_metadata(dest->as_register(), c->as_metadata());
574 }
575 break;
576 }
577
578 case T_FLOAT: {
579 if (dest->is_single_xmm()) {
580 if (UseAVX <= 2 && c->is_zero_float()) {
581 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
582 } else {
583 __ movflt(dest->as_xmm_float_reg(),
584 InternalAddress(float_constant(c->as_jfloat())));
585 }
586 } else {
587 ShouldNotReachHere();
588 }
589 break;
590 }
591
592 case T_DOUBLE: {
593 if (dest->is_double_xmm()) {
594 if (UseAVX <= 2 && c->is_zero_double()) {
595 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
596 } else {
597 __ movdbl(dest->as_xmm_double_reg(),
598 InternalAddress(double_constant(c->as_jdouble())));
599 }
600 } else {
601 ShouldNotReachHere();
602 }
603 break;
604 }
605
606 default:
607 ShouldNotReachHere();
608 }
609 }
610
611 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
612 assert(src->is_constant(), "should not call otherwise");
613 assert(dest->is_stack(), "should not call otherwise");
614 LIR_Const* c = src->as_constant_ptr();
615
616 switch (c->type()) {
617 case T_INT: // fall through
618 case T_FLOAT:
619 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
620 break;
621
622 case T_ADDRESS:
623 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
624 break;
625
626 case T_OBJECT:
627 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
628 break;
629
630 case T_LONG: // fall through
631 case T_DOUBLE:
632 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
633 lo_word_offset_in_bytes),
634 (intptr_t)c->as_jlong_bits(),
635 rscratch1);
636 break;
637
638 default:
639 ShouldNotReachHere();
640 }
641 }
642
643 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
644 assert(src->is_constant(), "should not call otherwise");
645 assert(dest->is_address(), "should not call otherwise");
646 LIR_Const* c = src->as_constant_ptr();
647 LIR_Address* addr = dest->as_address_ptr();
648
649 int null_check_here = code_offset();
650 switch (type) {
651 case T_INT: // fall through
652 case T_FLOAT:
653 __ movl(as_Address(addr), c->as_jint_bits());
654 break;
655
656 case T_ADDRESS:
657 __ movptr(as_Address(addr), c->as_jint_bits());
658 break;
659
660 case T_OBJECT: // fall through
661 case T_ARRAY:
662 if (c->as_jobject() == nullptr) {
663 if (UseCompressedOops && !wide) {
664 __ movl(as_Address(addr), NULL_WORD);
665 } else {
666 __ xorptr(rscratch1, rscratch1);
667 null_check_here = code_offset();
668 __ movptr(as_Address(addr), rscratch1);
669 }
670 } else {
671 if (is_literal_address(addr)) {
672 ShouldNotReachHere();
673 __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1);
674 } else {
675 __ movoop(rscratch1, c->as_jobject());
676 if (UseCompressedOops && !wide) {
677 __ encode_heap_oop(rscratch1);
678 null_check_here = code_offset();
679 __ movl(as_Address_lo(addr), rscratch1);
680 } else {
681 null_check_here = code_offset();
682 __ movptr(as_Address_lo(addr), rscratch1);
683 }
684 }
685 }
686 break;
687
688 case T_LONG: // fall through
689 case T_DOUBLE:
690 if (is_literal_address(addr)) {
691 ShouldNotReachHere();
692 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
693 } else {
694 __ movptr(r10, (intptr_t)c->as_jlong_bits());
695 null_check_here = code_offset();
696 __ movptr(as_Address_lo(addr), r10);
697 }
698 break;
699
700 case T_BOOLEAN: // fall through
701 case T_BYTE:
702 __ movb(as_Address(addr), c->as_jint() & 0xFF);
703 break;
704
705 case T_CHAR: // fall through
706 case T_SHORT:
707 __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
708 break;
709
710 default:
711 ShouldNotReachHere();
712 };
713
714 if (info != nullptr) {
715 add_debug_info_for_null_check(null_check_here, info);
716 }
717 }
718
719
720 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
721 assert(src->is_register(), "should not call otherwise");
722 assert(dest->is_register(), "should not call otherwise");
723
724 // move between cpu-registers
725 if (dest->is_single_cpu()) {
726 if (src->type() == T_LONG) {
727 // Can do LONG -> OBJECT
728 move_regs(src->as_register_lo(), dest->as_register());
729 return;
730 }
731 assert(src->is_single_cpu(), "must match");
732 if (src->type() == T_OBJECT) {
733 __ verify_oop(src->as_register());
734 }
735 move_regs(src->as_register(), dest->as_register());
736
737 } else if (dest->is_double_cpu()) {
738 if (is_reference_type(src->type())) {
739 // Surprising to me but we can see move of a long to t_object
740 __ verify_oop(src->as_register());
741 move_regs(src->as_register(), dest->as_register_lo());
742 return;
743 }
744 assert(src->is_double_cpu(), "must match");
745 Register f_lo = src->as_register_lo();
746 Register f_hi = src->as_register_hi();
747 Register t_lo = dest->as_register_lo();
748 Register t_hi = dest->as_register_hi();
749 assert(f_hi == f_lo, "must be same");
750 assert(t_hi == t_lo, "must be same");
751 move_regs(f_lo, t_lo);
752
753 // move between xmm-registers
754 } else if (dest->is_single_xmm()) {
755 assert(src->is_single_xmm(), "must match");
756 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
757 } else if (dest->is_double_xmm()) {
758 assert(src->is_double_xmm(), "must match");
759 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
760
761 } else {
762 ShouldNotReachHere();
763 }
764 }
765
766 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
767 assert(src->is_register(), "should not call otherwise");
768 assert(dest->is_stack(), "should not call otherwise");
769
770 if (src->is_single_cpu()) {
771 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
772 if (is_reference_type(type)) {
773 __ verify_oop(src->as_register());
774 __ movptr (dst, src->as_register());
775 } else if (type == T_METADATA || type == T_ADDRESS) {
776 __ movptr (dst, src->as_register());
777 } else {
778 __ movl (dst, src->as_register());
779 }
780
781 } else if (src->is_double_cpu()) {
782 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
783 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
784 __ movptr (dstLO, src->as_register_lo());
785
786 } else if (src->is_single_xmm()) {
787 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
788 __ movflt(dst_addr, src->as_xmm_float_reg());
789
790 } else if (src->is_double_xmm()) {
791 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
792 __ movdbl(dst_addr, src->as_xmm_double_reg());
793
794 } else {
795 ShouldNotReachHere();
796 }
797 }
798
799
800 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
801 LIR_Address* to_addr = dest->as_address_ptr();
802 PatchingStub* patch = nullptr;
803 Register compressed_src = rscratch1;
804
805 if (is_reference_type(type)) {
806 __ verify_oop(src->as_register());
807 if (UseCompressedOops && !wide) {
808 __ movptr(compressed_src, src->as_register());
809 __ encode_heap_oop(compressed_src);
810 if (patch_code != lir_patch_none) {
811 info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
812 }
813 }
814 }
815
816 if (patch_code != lir_patch_none) {
817 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
818 Address toa = as_Address(to_addr);
819 assert(toa.disp() != 0, "must have");
820 }
821
822 int null_check_here = code_offset();
823 switch (type) {
824 case T_FLOAT: {
825 assert(src->is_single_xmm(), "not a float");
826 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
827 break;
828 }
829
830 case T_DOUBLE: {
831 assert(src->is_double_xmm(), "not a double");
832 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
833 break;
834 }
835
836 case T_ARRAY: // fall through
837 case T_OBJECT: // fall through
838 if (UseCompressedOops && !wide) {
839 __ movl(as_Address(to_addr), compressed_src);
840 } else {
841 __ movptr(as_Address(to_addr), src->as_register());
842 }
843 break;
844 case T_ADDRESS:
845 __ movptr(as_Address(to_addr), src->as_register());
846 break;
847 case T_INT:
848 __ movl(as_Address(to_addr), src->as_register());
849 break;
850
851 case T_LONG: {
852 Register from_lo = src->as_register_lo();
853 Register from_hi = src->as_register_hi();
854 __ movptr(as_Address_lo(to_addr), from_lo);
855 break;
856 }
857
858 case T_BYTE: // fall through
859 case T_BOOLEAN: {
860 Register src_reg = src->as_register();
861 Address dst_addr = as_Address(to_addr);
862 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
863 __ movb(dst_addr, src_reg);
864 break;
865 }
866
867 case T_CHAR: // fall through
868 case T_SHORT:
869 __ movw(as_Address(to_addr), src->as_register());
870 break;
871
872 default:
873 ShouldNotReachHere();
874 }
875 if (info != nullptr) {
876 add_debug_info_for_null_check(null_check_here, info);
877 }
878
879 if (patch_code != lir_patch_none) {
880 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
881 }
882 }
883
884
885 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
886 assert(src->is_stack(), "should not call otherwise");
887 assert(dest->is_register(), "should not call otherwise");
888
889 if (dest->is_single_cpu()) {
890 if (is_reference_type(type)) {
891 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
892 __ verify_oop(dest->as_register());
893 } else if (type == T_METADATA || type == T_ADDRESS) {
894 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
895 } else {
896 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
897 }
898
899 } else if (dest->is_double_cpu()) {
900 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
901 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
902 __ movptr(dest->as_register_lo(), src_addr_LO);
903
904 } else if (dest->is_single_xmm()) {
905 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
906 __ movflt(dest->as_xmm_float_reg(), src_addr);
907
908 } else if (dest->is_double_xmm()) {
909 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
910 __ movdbl(dest->as_xmm_double_reg(), src_addr);
911
912 } else {
913 ShouldNotReachHere();
914 }
915 }
916
917
918 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
919 if (src->is_single_stack()) {
920 if (is_reference_type(type)) {
921 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
922 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
923 } else {
924 //no pushl on 64bits
925 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
926 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
927 }
928
929 } else if (src->is_double_stack()) {
930 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
931 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
932
933 } else {
934 ShouldNotReachHere();
935 }
936 }
937
938
939 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
940 assert(src->is_address(), "should not call otherwise");
941 assert(dest->is_register(), "should not call otherwise");
942
943 LIR_Address* addr = src->as_address_ptr();
944 Address from_addr = as_Address(addr);
945
946 if (addr->base()->type() == T_OBJECT) {
947 __ verify_oop(addr->base()->as_pointer_register());
948 }
949
950 switch (type) {
951 case T_BOOLEAN: // fall through
952 case T_BYTE: // fall through
953 case T_CHAR: // fall through
954 case T_SHORT:
955 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
956 // on pre P6 processors we may get partial register stalls
957 // so blow away the value of to_rinfo before loading a
958 // partial word into it. Do it here so that it precedes
959 // the potential patch point below.
960 __ xorptr(dest->as_register(), dest->as_register());
961 }
962 break;
963 default:
964 break;
965 }
966
967 PatchingStub* patch = nullptr;
968 if (patch_code != lir_patch_none) {
969 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
970 assert(from_addr.disp() != 0, "must have");
971 }
972 if (info != nullptr) {
973 add_debug_info_for_null_check_here(info);
974 }
975
976 switch (type) {
977 case T_FLOAT: {
978 if (dest->is_single_xmm()) {
979 __ movflt(dest->as_xmm_float_reg(), from_addr);
980 } else {
981 ShouldNotReachHere();
982 }
983 break;
984 }
985
986 case T_DOUBLE: {
987 if (dest->is_double_xmm()) {
988 __ movdbl(dest->as_xmm_double_reg(), from_addr);
989 } else {
990 ShouldNotReachHere();
991 }
992 break;
993 }
994
995 case T_OBJECT: // fall through
996 case T_ARRAY: // fall through
997 if (UseCompressedOops && !wide) {
998 __ movl(dest->as_register(), from_addr);
999 } else {
1000 __ movptr(dest->as_register(), from_addr);
1001 }
1002 break;
1003
1004 case T_ADDRESS:
1005 __ movptr(dest->as_register(), from_addr);
1006 break;
1007 case T_INT:
1008 __ movl(dest->as_register(), from_addr);
1009 break;
1010
1011 case T_LONG: {
1012 Register to_lo = dest->as_register_lo();
1013 Register to_hi = dest->as_register_hi();
1014 __ movptr(to_lo, as_Address_lo(addr));
1015 break;
1016 }
1017
1018 case T_BOOLEAN: // fall through
1019 case T_BYTE: {
1020 Register dest_reg = dest->as_register();
1021 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1022 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1023 __ movsbl(dest_reg, from_addr);
1024 } else {
1025 __ movb(dest_reg, from_addr);
1026 __ shll(dest_reg, 24);
1027 __ sarl(dest_reg, 24);
1028 }
1029 break;
1030 }
1031
1032 case T_CHAR: {
1033 Register dest_reg = dest->as_register();
1034 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1035 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1036 __ movzwl(dest_reg, from_addr);
1037 } else {
1038 __ movw(dest_reg, from_addr);
1039 }
1040 break;
1041 }
1042
1043 case T_SHORT: {
1044 Register dest_reg = dest->as_register();
1045 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1046 __ movswl(dest_reg, from_addr);
1047 } else {
1048 __ movw(dest_reg, from_addr);
1049 __ shll(dest_reg, 16);
1050 __ sarl(dest_reg, 16);
1051 }
1052 break;
1053 }
1054
1055 default:
1056 ShouldNotReachHere();
1057 }
1058
1059 if (patch != nullptr) {
1060 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1061 }
1062
1063 if (is_reference_type(type)) {
1064 if (UseCompressedOops && !wide) {
1065 __ decode_heap_oop(dest->as_register());
1066 }
1067
1068 __ verify_oop(dest->as_register());
1069 }
1070 }
1071
1072
1073 NEEDS_CLEANUP; // This could be static?
1074 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1075 int elem_size = type2aelembytes(type);
1076 switch (elem_size) {
1077 case 1: return Address::times_1;
1078 case 2: return Address::times_2;
1079 case 4: return Address::times_4;
1080 case 8: return Address::times_8;
1081 }
1082 ShouldNotReachHere();
1083 return Address::no_scale;
1084 }
1085
1086
1087 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1088 switch (op->code()) {
1089 case lir_idiv:
1090 case lir_irem:
1091 arithmetic_idiv(op->code(),
1092 op->in_opr1(),
1093 op->in_opr2(),
1094 op->in_opr3(),
1095 op->result_opr(),
1096 op->info());
1097 break;
1098 case lir_fmad:
1099 __ fmad(op->result_opr()->as_xmm_double_reg(),
1100 op->in_opr1()->as_xmm_double_reg(),
1101 op->in_opr2()->as_xmm_double_reg(),
1102 op->in_opr3()->as_xmm_double_reg());
1103 break;
1104 case lir_fmaf:
1105 __ fmaf(op->result_opr()->as_xmm_float_reg(),
1106 op->in_opr1()->as_xmm_float_reg(),
1107 op->in_opr2()->as_xmm_float_reg(),
1108 op->in_opr3()->as_xmm_float_reg());
1109 break;
1110 default: ShouldNotReachHere(); break;
1111 }
1112 }
1113
1114 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1115 #ifdef ASSERT
1116 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1117 if (op->block() != nullptr) _branch_target_blocks.append(op->block());
1118 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1119 #endif
1120
1121 if (op->cond() == lir_cond_always) {
1122 if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1123 __ jmp (*(op->label()));
1124 } else {
1125 Assembler::Condition acond = Assembler::zero;
1126 if (op->code() == lir_cond_float_branch) {
1127 assert(op->ublock() != nullptr, "must have unordered successor");
1128 __ jcc(Assembler::parity, *(op->ublock()->label()));
1129 switch(op->cond()) {
1130 case lir_cond_equal: acond = Assembler::equal; break;
1131 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1132 case lir_cond_less: acond = Assembler::below; break;
1133 case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1134 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1135 case lir_cond_greater: acond = Assembler::above; break;
1136 default: ShouldNotReachHere();
1137 }
1138 } else {
1139 switch (op->cond()) {
1140 case lir_cond_equal: acond = Assembler::equal; break;
1141 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1142 case lir_cond_less: acond = Assembler::less; break;
1143 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1144 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1145 case lir_cond_greater: acond = Assembler::greater; break;
1146 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1147 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1148 default: ShouldNotReachHere();
1149 }
1150 }
1151 __ jcc(acond,*(op->label()));
1152 }
1153 }
1154
1155 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1156 LIR_Opr src = op->in_opr();
1157 LIR_Opr dest = op->result_opr();
1158
1159 switch (op->bytecode()) {
1160 case Bytecodes::_i2l:
1161 __ movl2ptr(dest->as_register_lo(), src->as_register());
1162 break;
1163
1164 case Bytecodes::_l2i:
1165 __ movl(dest->as_register(), src->as_register_lo());
1166 break;
1167
1168 case Bytecodes::_i2b:
1169 move_regs(src->as_register(), dest->as_register());
1170 __ sign_extend_byte(dest->as_register());
1171 break;
1172
1173 case Bytecodes::_i2c:
1174 move_regs(src->as_register(), dest->as_register());
1175 __ andl(dest->as_register(), 0xFFFF);
1176 break;
1177
1178 case Bytecodes::_i2s:
1179 move_regs(src->as_register(), dest->as_register());
1180 __ sign_extend_short(dest->as_register());
1181 break;
1182
1183 case Bytecodes::_f2d:
1184 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1185 break;
1186
1187 case Bytecodes::_d2f:
1188 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1189 break;
1190
1191 case Bytecodes::_i2f:
1192 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1193 break;
1194
1195 case Bytecodes::_i2d:
1196 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1197 break;
1198
1199 case Bytecodes::_l2f:
1200 __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo());
1201 break;
1202
1203 case Bytecodes::_l2d:
1204 __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo());
1205 break;
1206
1207 case Bytecodes::_f2i:
1208 __ convert_f2i(dest->as_register(), src->as_xmm_float_reg());
1209 break;
1210
1211 case Bytecodes::_d2i:
1212 __ convert_d2i(dest->as_register(), src->as_xmm_double_reg());
1213 break;
1214
1215 case Bytecodes::_f2l:
1216 __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg());
1217 break;
1218
1219 case Bytecodes::_d2l:
1220 __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg());
1221 break;
1222
1223 default: ShouldNotReachHere();
1224 }
1225 }
1226
1227 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1228 if (op->init_check()) {
1229 add_debug_info_for_null_check_here(op->stub()->info());
1230 // init_state needs acquire, but x86 is TSO, and so we are already good.
1231 __ cmpb(Address(op->klass()->as_register(),
1232 InstanceKlass::init_state_offset()),
1233 InstanceKlass::fully_initialized);
1234 __ jcc(Assembler::notEqual, *op->stub()->entry());
1235 }
1236 __ allocate_object(op->obj()->as_register(),
1237 op->tmp1()->as_register(),
1238 op->tmp2()->as_register(),
1239 op->header_size(),
1240 op->object_size(),
1241 op->klass()->as_register(),
1242 *op->stub()->entry());
1243 __ bind(*op->stub()->continuation());
1244 }
1245
1246 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1247 Register len = op->len()->as_register();
1248 __ movslq(len, len);
1249
1250 if (UseSlowPath ||
1251 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1252 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1253 __ jmp(*op->stub()->entry());
1254 } else {
1255 Register tmp1 = op->tmp1()->as_register();
1256 Register tmp2 = op->tmp2()->as_register();
1257 Register tmp3 = op->tmp3()->as_register();
1258 if (len == tmp1) {
1259 tmp1 = tmp3;
1260 } else if (len == tmp2) {
1261 tmp2 = tmp3;
1262 } else if (len == tmp3) {
1263 // everything is ok
1264 } else {
1265 __ mov(tmp3, len);
1266 }
1267 __ allocate_array(op->obj()->as_register(),
1268 len,
1269 tmp1,
1270 tmp2,
1271 arrayOopDesc::base_offset_in_bytes(op->type()),
1272 array_element_size(op->type()),
1273 op->klass()->as_register(),
1274 *op->stub()->entry(),
1275 op->zero_array());
1276 }
1277 __ bind(*op->stub()->continuation());
1278 }
1279
1280 void LIR_Assembler::type_profile_helper(Register mdo,
1281 ciMethodData *md, ciProfileData *data,
1282 Register recv) {
1283 int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
1284 __ profile_receiver_type(recv, mdo, mdp_offset);
1285 }
1286
1287 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1288 // we always need a stub for the failure case.
1289 CodeStub* stub = op->stub();
1290 Register obj = op->object()->as_register();
1291 Register k_RInfo = op->tmp1()->as_register();
1292 Register klass_RInfo = op->tmp2()->as_register();
1293 Register dst = op->result_opr()->as_register();
1294 ciKlass* k = op->klass();
1295 Register Rtmp1 = noreg;
1296 Register tmp_load_klass = rscratch1;
1297
1298 // check if it needs to be profiled
1299 ciMethodData* md = nullptr;
1300 ciProfileData* data = nullptr;
1301
1302 if (op->should_profile()) {
1303 ciMethod* method = op->profiled_method();
1304 assert(method != nullptr, "Should have method");
1305 int bci = op->profiled_bci();
1306 md = method->method_data_or_null();
1307 assert(md != nullptr, "Sanity");
1308 data = md->bci_to_data(bci);
1309 assert(data != nullptr, "need data for type check");
1310 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1311 }
1312 Label* success_target = success;
1313 Label* failure_target = failure;
1314
1315 if (obj == k_RInfo) {
1316 k_RInfo = dst;
1317 } else if (obj == klass_RInfo) {
1318 klass_RInfo = dst;
1319 }
1320 Rtmp1 = op->tmp3()->as_register();
1321 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1322
1323 assert_different_registers(obj, k_RInfo, klass_RInfo);
1324
1325 __ testptr(obj, obj);
1326 if (op->should_profile()) {
1327 Label not_null;
1328 Register mdo = klass_RInfo;
1329 __ mov_metadata(mdo, md->constant_encoding());
1330 __ jccb(Assembler::notEqual, not_null);
1331 // Object is null; update MDO and exit
1332 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1333 int header_bits = BitData::null_seen_byte_constant();
1334 __ orb(data_addr, header_bits);
1335 __ jmp(*obj_is_null);
1336 __ bind(not_null);
1337
1338 Register recv = k_RInfo;
1339 __ load_klass(recv, obj, tmp_load_klass);
1340 type_profile_helper(mdo, md, data, recv);
1341 } else {
1342 __ jcc(Assembler::equal, *obj_is_null);
1343 }
1344
1345 if (!k->is_loaded()) {
1346 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1347 } else {
1348 __ mov_metadata(k_RInfo, k->constant_encoding());
1349 }
1350 __ verify_oop(obj);
1351
1352 if (op->fast_check()) {
1353 // get object class
1354 // not a safepoint as obj null check happens earlier
1355 __ load_klass(Rtmp1, obj, tmp_load_klass);
1356 __ cmpptr(k_RInfo, Rtmp1);
1357 __ jcc(Assembler::notEqual, *failure_target);
1358 // successful cast, fall through to profile or jump
1359 } else {
1360 // get object class
1361 // not a safepoint as obj null check happens earlier
1362 __ load_klass(klass_RInfo, obj, tmp_load_klass);
1363 if (k->is_loaded()) {
1364 // See if we get an immediate positive hit
1365 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1366 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1367 __ jcc(Assembler::notEqual, *failure_target);
1368 // successful cast, fall through to profile or jump
1369 } else {
1370 // See if we get an immediate positive hit
1371 __ jcc(Assembler::equal, *success_target);
1372 // check for self
1373 __ cmpptr(klass_RInfo, k_RInfo);
1374 __ jcc(Assembler::equal, *success_target);
1375
1376 __ push_ppx(klass_RInfo);
1377 __ push_ppx(k_RInfo);
1378 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1379 __ pop_ppx(klass_RInfo);
1380 __ pop_ppx(klass_RInfo);
1381 // result is a boolean
1382 __ testl(klass_RInfo, klass_RInfo);
1383 __ jcc(Assembler::equal, *failure_target);
1384 // successful cast, fall through to profile or jump
1385 }
1386 } else {
1387 // perform the fast part of the checking logic
1388 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1389 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1390 __ push_ppx(klass_RInfo);
1391 __ push_ppx(k_RInfo);
1392 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1393 __ pop_ppx(klass_RInfo);
1394 __ pop_ppx(k_RInfo);
1395 // result is a boolean
1396 __ testl(k_RInfo, k_RInfo);
1397 __ jcc(Assembler::equal, *failure_target);
1398 // successful cast, fall through to profile or jump
1399 }
1400 }
1401 __ jmp(*success);
1402 }
1403
1404
1405 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1406 Register tmp_load_klass = rscratch1;
1407 LIR_Code code = op->code();
1408 if (code == lir_store_check) {
1409 Register value = op->object()->as_register();
1410 Register array = op->array()->as_register();
1411 Register k_RInfo = op->tmp1()->as_register();
1412 Register klass_RInfo = op->tmp2()->as_register();
1413 Register Rtmp1 = op->tmp3()->as_register();
1414
1415 CodeStub* stub = op->stub();
1416
1417 // check if it needs to be profiled
1418 ciMethodData* md = nullptr;
1419 ciProfileData* data = nullptr;
1420
1421 if (op->should_profile()) {
1422 ciMethod* method = op->profiled_method();
1423 assert(method != nullptr, "Should have method");
1424 int bci = op->profiled_bci();
1425 md = method->method_data_or_null();
1426 assert(md != nullptr, "Sanity");
1427 data = md->bci_to_data(bci);
1428 assert(data != nullptr, "need data for type check");
1429 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1430 }
1431 Label done;
1432 Label* success_target = &done;
1433 Label* failure_target = stub->entry();
1434
1435 __ testptr(value, value);
1436 if (op->should_profile()) {
1437 Label not_null;
1438 Register mdo = klass_RInfo;
1439 __ mov_metadata(mdo, md->constant_encoding());
1440 __ jccb(Assembler::notEqual, not_null);
1441 // Object is null; update MDO and exit
1442 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1443 int header_bits = BitData::null_seen_byte_constant();
1444 __ orb(data_addr, header_bits);
1445 __ jmp(done);
1446 __ bind(not_null);
1447
1448 Register recv = k_RInfo;
1449 __ load_klass(recv, value, tmp_load_klass);
1450 type_profile_helper(mdo, md, data, recv);
1451 } else {
1452 __ jcc(Assembler::equal, done);
1453 }
1454
1455 add_debug_info_for_null_check_here(op->info_for_exception());
1456 __ load_klass(k_RInfo, array, tmp_load_klass);
1457 __ load_klass(klass_RInfo, value, tmp_load_klass);
1458
1459 // get instance klass (it's already uncompressed)
1460 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1461 // perform the fast part of the checking logic
1462 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1463 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1464 __ push_ppx(klass_RInfo);
1465 __ push_ppx(k_RInfo);
1466 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1467 __ pop_ppx(klass_RInfo);
1468 __ pop_ppx(k_RInfo);
1469 // result is a boolean
1470 __ testl(k_RInfo, k_RInfo);
1471 __ jcc(Assembler::equal, *failure_target);
1472 // fall through to the success case
1473
1474 __ bind(done);
1475 } else
1476 if (code == lir_checkcast) {
1477 Register obj = op->object()->as_register();
1478 Register dst = op->result_opr()->as_register();
1479 Label success;
1480 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1481 __ bind(success);
1482 if (dst != obj) {
1483 __ mov(dst, obj);
1484 }
1485 } else
1486 if (code == lir_instanceof) {
1487 Register obj = op->object()->as_register();
1488 Register dst = op->result_opr()->as_register();
1489 Label success, failure, done;
1490 emit_typecheck_helper(op, &success, &failure, &failure);
1491 __ bind(failure);
1492 __ xorptr(dst, dst);
1493 __ jmpb(done);
1494 __ bind(success);
1495 __ movptr(dst, 1);
1496 __ bind(done);
1497 } else {
1498 ShouldNotReachHere();
1499 }
1500
1501 }
1502
1503
1504 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1505 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1506 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1507 Register newval = op->new_value()->as_register();
1508 Register cmpval = op->cmp_value()->as_register();
1509 assert(cmpval == rax, "wrong register");
1510 assert(newval != noreg, "new val must be register");
1511 assert(cmpval != newval, "cmp and new values must be in different registers");
1512 assert(cmpval != addr, "cmp and addr must be in different registers");
1513 assert(newval != addr, "new value and addr must be in different registers");
1514
1515 if (op->code() == lir_cas_obj) {
1516 if (UseCompressedOops) {
1517 __ encode_heap_oop(cmpval);
1518 __ mov(rscratch1, newval);
1519 __ encode_heap_oop(rscratch1);
1520 __ lock();
1521 // cmpval (rax) is implicitly used by this instruction
1522 __ cmpxchgl(rscratch1, Address(addr, 0));
1523 } else {
1524 __ lock();
1525 __ cmpxchgptr(newval, Address(addr, 0));
1526 }
1527 } else {
1528 assert(op->code() == lir_cas_int, "lir_cas_int expected");
1529 __ lock();
1530 __ cmpxchgl(newval, Address(addr, 0));
1531 }
1532 } else if (op->code() == lir_cas_long) {
1533 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1534 Register newval = op->new_value()->as_register_lo();
1535 Register cmpval = op->cmp_value()->as_register_lo();
1536 assert(cmpval == rax, "wrong register");
1537 assert(newval != noreg, "new val must be register");
1538 assert(cmpval != newval, "cmp and new values must be in different registers");
1539 assert(cmpval != addr, "cmp and addr must be in different registers");
1540 assert(newval != addr, "new value and addr must be in different registers");
1541 __ lock();
1542 __ cmpxchgq(newval, Address(addr, 0));
1543 } else {
1544 Unimplemented();
1545 }
1546 }
1547
1548 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1549 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1550 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
1551
1552 Assembler::Condition acond, ncond;
1553 switch (condition) {
1554 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
1555 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
1556 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
1557 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
1558 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
1559 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
1560 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
1561 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
1562 default: acond = Assembler::equal; ncond = Assembler::notEqual;
1563 ShouldNotReachHere();
1564 }
1565
1566 if (opr1->is_cpu_register()) {
1567 reg2reg(opr1, result);
1568 } else if (opr1->is_stack()) {
1569 stack2reg(opr1, result, result->type());
1570 } else if (opr1->is_constant()) {
1571 const2reg(opr1, result, lir_patch_none, nullptr);
1572 } else {
1573 ShouldNotReachHere();
1574 }
1575
1576 if (VM_Version::supports_cmov() && !opr2->is_constant()) {
1577 // optimized version that does not require a branch
1578 if (opr2->is_single_cpu()) {
1579 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1580 __ cmov(ncond, result->as_register(), opr2->as_register());
1581 } else if (opr2->is_double_cpu()) {
1582 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1583 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1584 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
1585 } else if (opr2->is_single_stack()) {
1586 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
1587 } else if (opr2->is_double_stack()) {
1588 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
1589 } else {
1590 ShouldNotReachHere();
1591 }
1592
1593 } else {
1594 Label skip;
1595 __ jccb(acond, skip);
1596 if (opr2->is_cpu_register()) {
1597 reg2reg(opr2, result);
1598 } else if (opr2->is_stack()) {
1599 stack2reg(opr2, result, result->type());
1600 } else if (opr2->is_constant()) {
1601 const2reg(opr2, result, lir_patch_none, nullptr);
1602 } else {
1603 ShouldNotReachHere();
1604 }
1605 __ bind(skip);
1606 }
1607 }
1608
1609
1610 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1611 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1612
1613 if (left->is_single_cpu()) {
1614 assert(left == dest, "left and dest must be equal");
1615 Register lreg = left->as_register();
1616
1617 if (right->is_single_cpu()) {
1618 // cpu register - cpu register
1619 Register rreg = right->as_register();
1620 switch (code) {
1621 case lir_add: __ addl (lreg, rreg); break;
1622 case lir_sub: __ subl (lreg, rreg); break;
1623 case lir_mul: __ imull(lreg, rreg); break;
1624 default: ShouldNotReachHere();
1625 }
1626
1627 } else if (right->is_stack()) {
1628 // cpu register - stack
1629 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1630 switch (code) {
1631 case lir_add: __ addl(lreg, raddr); break;
1632 case lir_sub: __ subl(lreg, raddr); break;
1633 default: ShouldNotReachHere();
1634 }
1635
1636 } else if (right->is_constant()) {
1637 // cpu register - constant
1638 jint c = right->as_constant_ptr()->as_jint();
1639 switch (code) {
1640 case lir_add: {
1641 __ incrementl(lreg, c);
1642 break;
1643 }
1644 case lir_sub: {
1645 __ decrementl(lreg, c);
1646 break;
1647 }
1648 default: ShouldNotReachHere();
1649 }
1650
1651 } else {
1652 ShouldNotReachHere();
1653 }
1654
1655 } else if (left->is_double_cpu()) {
1656 assert(left == dest, "left and dest must be equal");
1657 Register lreg_lo = left->as_register_lo();
1658 Register lreg_hi = left->as_register_hi();
1659
1660 if (right->is_double_cpu()) {
1661 // cpu register - cpu register
1662 Register rreg_lo = right->as_register_lo();
1663 Register rreg_hi = right->as_register_hi();
1664 assert_different_registers(lreg_lo, rreg_lo);
1665 switch (code) {
1666 case lir_add:
1667 __ addptr(lreg_lo, rreg_lo);
1668 break;
1669 case lir_sub:
1670 __ subptr(lreg_lo, rreg_lo);
1671 break;
1672 case lir_mul:
1673 __ imulq(lreg_lo, rreg_lo);
1674 break;
1675 default:
1676 ShouldNotReachHere();
1677 }
1678
1679 } else if (right->is_constant()) {
1680 // cpu register - constant
1681 jlong c = right->as_constant_ptr()->as_jlong_bits();
1682 __ movptr(r10, (intptr_t) c);
1683 switch (code) {
1684 case lir_add:
1685 __ addptr(lreg_lo, r10);
1686 break;
1687 case lir_sub:
1688 __ subptr(lreg_lo, r10);
1689 break;
1690 default:
1691 ShouldNotReachHere();
1692 }
1693
1694 } else {
1695 ShouldNotReachHere();
1696 }
1697
1698 } else if (left->is_single_xmm()) {
1699 assert(left == dest, "left and dest must be equal");
1700 XMMRegister lreg = left->as_xmm_float_reg();
1701
1702 if (right->is_single_xmm()) {
1703 XMMRegister rreg = right->as_xmm_float_reg();
1704 switch (code) {
1705 case lir_add: __ addss(lreg, rreg); break;
1706 case lir_sub: __ subss(lreg, rreg); break;
1707 case lir_mul: __ mulss(lreg, rreg); break;
1708 case lir_div: __ divss(lreg, rreg); break;
1709 default: ShouldNotReachHere();
1710 }
1711 } else {
1712 Address raddr;
1713 if (right->is_single_stack()) {
1714 raddr = frame_map()->address_for_slot(right->single_stack_ix());
1715 } else if (right->is_constant()) {
1716 // hack for now
1717 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
1718 } else {
1719 ShouldNotReachHere();
1720 }
1721 switch (code) {
1722 case lir_add: __ addss(lreg, raddr); break;
1723 case lir_sub: __ subss(lreg, raddr); break;
1724 case lir_mul: __ mulss(lreg, raddr); break;
1725 case lir_div: __ divss(lreg, raddr); break;
1726 default: ShouldNotReachHere();
1727 }
1728 }
1729
1730 } else if (left->is_double_xmm()) {
1731 assert(left == dest, "left and dest must be equal");
1732
1733 XMMRegister lreg = left->as_xmm_double_reg();
1734 if (right->is_double_xmm()) {
1735 XMMRegister rreg = right->as_xmm_double_reg();
1736 switch (code) {
1737 case lir_add: __ addsd(lreg, rreg); break;
1738 case lir_sub: __ subsd(lreg, rreg); break;
1739 case lir_mul: __ mulsd(lreg, rreg); break;
1740 case lir_div: __ divsd(lreg, rreg); break;
1741 default: ShouldNotReachHere();
1742 }
1743 } else {
1744 Address raddr;
1745 if (right->is_double_stack()) {
1746 raddr = frame_map()->address_for_slot(right->double_stack_ix());
1747 } else if (right->is_constant()) {
1748 // hack for now
1749 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
1750 } else {
1751 ShouldNotReachHere();
1752 }
1753 switch (code) {
1754 case lir_add: __ addsd(lreg, raddr); break;
1755 case lir_sub: __ subsd(lreg, raddr); break;
1756 case lir_mul: __ mulsd(lreg, raddr); break;
1757 case lir_div: __ divsd(lreg, raddr); break;
1758 default: ShouldNotReachHere();
1759 }
1760 }
1761
1762 } else if (left->is_single_stack() || left->is_address()) {
1763 assert(left == dest, "left and dest must be equal");
1764
1765 Address laddr;
1766 if (left->is_single_stack()) {
1767 laddr = frame_map()->address_for_slot(left->single_stack_ix());
1768 } else if (left->is_address()) {
1769 laddr = as_Address(left->as_address_ptr());
1770 } else {
1771 ShouldNotReachHere();
1772 }
1773
1774 if (right->is_single_cpu()) {
1775 Register rreg = right->as_register();
1776 switch (code) {
1777 case lir_add: __ addl(laddr, rreg); break;
1778 case lir_sub: __ subl(laddr, rreg); break;
1779 default: ShouldNotReachHere();
1780 }
1781 } else if (right->is_constant()) {
1782 jint c = right->as_constant_ptr()->as_jint();
1783 switch (code) {
1784 case lir_add: {
1785 __ incrementl(laddr, c);
1786 break;
1787 }
1788 case lir_sub: {
1789 __ decrementl(laddr, c);
1790 break;
1791 }
1792 default: ShouldNotReachHere();
1793 }
1794 } else {
1795 ShouldNotReachHere();
1796 }
1797
1798 } else {
1799 ShouldNotReachHere();
1800 }
1801 }
1802
1803
1804 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1805 if (value->is_double_xmm()) {
1806 switch(code) {
1807 case lir_abs :
1808 {
1809 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1810 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1811 }
1812 assert(!tmp->is_valid(), "do not need temporary");
1813 __ andpd(dest->as_xmm_double_reg(),
1814 ExternalAddress((address)double_signmask_pool),
1815 rscratch1);
1816 }
1817 break;
1818
1819 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1820 // all other intrinsics are not available in the SSE instruction set, so FPU is used
1821 default : ShouldNotReachHere();
1822 }
1823
1824 } else if (code == lir_f2hf) {
1825 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1826 } else if (code == lir_hf2f) {
1827 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1828 } else {
1829 Unimplemented();
1830 }
1831 }
1832
1833 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1834 // assert(left->destroys_register(), "check");
1835 if (left->is_single_cpu()) {
1836 Register reg = left->as_register();
1837 if (right->is_constant()) {
1838 int val = right->as_constant_ptr()->as_jint();
1839 switch (code) {
1840 case lir_logic_and: __ andl (reg, val); break;
1841 case lir_logic_or: __ orl (reg, val); break;
1842 case lir_logic_xor: __ xorl (reg, val); break;
1843 default: ShouldNotReachHere();
1844 }
1845 } else if (right->is_stack()) {
1846 // added support for stack operands
1847 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1848 switch (code) {
1849 case lir_logic_and: __ andl (reg, raddr); break;
1850 case lir_logic_or: __ orl (reg, raddr); break;
1851 case lir_logic_xor: __ xorl (reg, raddr); break;
1852 default: ShouldNotReachHere();
1853 }
1854 } else {
1855 Register rright = right->as_register();
1856 switch (code) {
1857 case lir_logic_and: __ andptr (reg, rright); break;
1858 case lir_logic_or : __ orptr (reg, rright); break;
1859 case lir_logic_xor: __ xorptr (reg, rright); break;
1860 default: ShouldNotReachHere();
1861 }
1862 }
1863 move_regs(reg, dst->as_register());
1864 } else {
1865 Register l_lo = left->as_register_lo();
1866 Register l_hi = left->as_register_hi();
1867 if (right->is_constant()) {
1868 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
1869 switch (code) {
1870 case lir_logic_and:
1871 __ andq(l_lo, rscratch1);
1872 break;
1873 case lir_logic_or:
1874 __ orq(l_lo, rscratch1);
1875 break;
1876 case lir_logic_xor:
1877 __ xorq(l_lo, rscratch1);
1878 break;
1879 default: ShouldNotReachHere();
1880 }
1881 } else {
1882 Register r_lo;
1883 if (is_reference_type(right->type())) {
1884 r_lo = right->as_register();
1885 } else {
1886 r_lo = right->as_register_lo();
1887 }
1888 switch (code) {
1889 case lir_logic_and:
1890 __ andptr(l_lo, r_lo);
1891 break;
1892 case lir_logic_or:
1893 __ orptr(l_lo, r_lo);
1894 break;
1895 case lir_logic_xor:
1896 __ xorptr(l_lo, r_lo);
1897 break;
1898 default: ShouldNotReachHere();
1899 }
1900 }
1901
1902 Register dst_lo = dst->as_register_lo();
1903 Register dst_hi = dst->as_register_hi();
1904
1905 move_regs(l_lo, dst_lo);
1906 }
1907 }
1908
1909
1910 // we assume that rax, and rdx can be overwritten
1911 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
1912
1913 assert(left->is_single_cpu(), "left must be register");
1914 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
1915 assert(result->is_single_cpu(), "result must be register");
1916
1917 // assert(left->destroys_register(), "check");
1918 // assert(right->destroys_register(), "check");
1919
1920 Register lreg = left->as_register();
1921 Register dreg = result->as_register();
1922
1923 if (right->is_constant()) {
1924 jint divisor = right->as_constant_ptr()->as_jint();
1925 assert(divisor > 0 && is_power_of_2(divisor), "must be");
1926 if (code == lir_idiv) {
1927 assert(lreg == rax, "must be rax,");
1928 assert(temp->as_register() == rdx, "tmp register must be rdx");
1929 __ cdql(); // sign extend into rdx:rax
1930 if (divisor == 2) {
1931 __ subl(lreg, rdx);
1932 } else {
1933 __ andl(rdx, divisor - 1);
1934 __ addl(lreg, rdx);
1935 }
1936 __ sarl(lreg, log2i_exact(divisor));
1937 move_regs(lreg, dreg);
1938 } else if (code == lir_irem) {
1939 Label done;
1940 __ mov(dreg, lreg);
1941 __ andl(dreg, 0x80000000 | (divisor - 1));
1942 __ jcc(Assembler::positive, done);
1943 __ decrement(dreg);
1944 __ orl(dreg, ~(divisor - 1));
1945 __ increment(dreg);
1946 __ bind(done);
1947 } else {
1948 ShouldNotReachHere();
1949 }
1950 } else {
1951 Register rreg = right->as_register();
1952 assert(lreg == rax, "left register must be rax,");
1953 assert(rreg != rdx, "right register must not be rdx");
1954 assert(temp->as_register() == rdx, "tmp register must be rdx");
1955
1956 move_regs(lreg, rax);
1957
1958 int idivl_offset = __ corrected_idivl(rreg);
1959 if (ImplicitDiv0Checks) {
1960 add_debug_info_for_div0(idivl_offset, info);
1961 }
1962 if (code == lir_irem) {
1963 move_regs(rdx, dreg); // result is in rdx
1964 } else {
1965 move_regs(rax, dreg);
1966 }
1967 }
1968 }
1969
1970
1971 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1972 if (opr1->is_single_cpu()) {
1973 Register reg1 = opr1->as_register();
1974 if (opr2->is_single_cpu()) {
1975 // cpu register - cpu register
1976 if (is_reference_type(opr1->type())) {
1977 __ cmpoop(reg1, opr2->as_register());
1978 } else {
1979 assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1980 __ cmpl(reg1, opr2->as_register());
1981 }
1982 } else if (opr2->is_stack()) {
1983 // cpu register - stack
1984 if (is_reference_type(opr1->type())) {
1985 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1986 } else {
1987 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1988 }
1989 } else if (opr2->is_constant()) {
1990 // cpu register - constant
1991 LIR_Const* c = opr2->as_constant_ptr();
1992 if (c->type() == T_INT) {
1993 jint i = c->as_jint();
1994 if (i == 0) {
1995 __ testl(reg1, reg1);
1996 } else {
1997 __ cmpl(reg1, i);
1998 }
1999 } else if (c->type() == T_METADATA) {
2000 // All we need for now is a comparison with null for equality.
2001 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
2002 Metadata* m = c->as_metadata();
2003 if (m == nullptr) {
2004 __ testptr(reg1, reg1);
2005 } else {
2006 ShouldNotReachHere();
2007 }
2008 } else if (is_reference_type(c->type())) {
2009 // In 64bit oops are single register
2010 jobject o = c->as_jobject();
2011 if (o == nullptr) {
2012 __ testptr(reg1, reg1);
2013 } else {
2014 __ cmpoop(reg1, o, rscratch1);
2015 }
2016 } else {
2017 fatal("unexpected type: %s", basictype_to_str(c->type()));
2018 }
2019 // cpu register - address
2020 } else if (opr2->is_address()) {
2021 if (op->info() != nullptr) {
2022 add_debug_info_for_null_check_here(op->info());
2023 }
2024 __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2025 } else {
2026 ShouldNotReachHere();
2027 }
2028
2029 } else if(opr1->is_double_cpu()) {
2030 Register xlo = opr1->as_register_lo();
2031 Register xhi = opr1->as_register_hi();
2032 if (opr2->is_double_cpu()) {
2033 __ cmpptr(xlo, opr2->as_register_lo());
2034 } else if (opr2->is_constant()) {
2035 // cpu register - constant 0
2036 assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2037 __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2038 } else {
2039 ShouldNotReachHere();
2040 }
2041
2042 } else if (opr1->is_single_xmm()) {
2043 XMMRegister reg1 = opr1->as_xmm_float_reg();
2044 if (opr2->is_single_xmm()) {
2045 // xmm register - xmm register
2046 __ ucomiss(reg1, opr2->as_xmm_float_reg());
2047 } else if (opr2->is_stack()) {
2048 // xmm register - stack
2049 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2050 } else if (opr2->is_constant()) {
2051 // xmm register - constant
2052 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2053 } else if (opr2->is_address()) {
2054 // xmm register - address
2055 if (op->info() != nullptr) {
2056 add_debug_info_for_null_check_here(op->info());
2057 }
2058 __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2059 } else {
2060 ShouldNotReachHere();
2061 }
2062
2063 } else if (opr1->is_double_xmm()) {
2064 XMMRegister reg1 = opr1->as_xmm_double_reg();
2065 if (opr2->is_double_xmm()) {
2066 // xmm register - xmm register
2067 __ ucomisd(reg1, opr2->as_xmm_double_reg());
2068 } else if (opr2->is_stack()) {
2069 // xmm register - stack
2070 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2071 } else if (opr2->is_constant()) {
2072 // xmm register - constant
2073 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2074 } else if (opr2->is_address()) {
2075 // xmm register - address
2076 if (op->info() != nullptr) {
2077 add_debug_info_for_null_check_here(op->info());
2078 }
2079 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2080 } else {
2081 ShouldNotReachHere();
2082 }
2083
2084 } else if (opr1->is_address() && opr2->is_constant()) {
2085 LIR_Const* c = opr2->as_constant_ptr();
2086 if (is_reference_type(c->type())) {
2087 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2088 __ movoop(rscratch1, c->as_jobject());
2089 }
2090 if (op->info() != nullptr) {
2091 add_debug_info_for_null_check_here(op->info());
2092 }
2093 // special case: address - constant
2094 LIR_Address* addr = opr1->as_address_ptr();
2095 if (c->type() == T_INT) {
2096 __ cmpl(as_Address(addr), c->as_jint());
2097 } else if (is_reference_type(c->type())) {
2098 // %%% Make this explode if addr isn't reachable until we figure out a
2099 // better strategy by giving noreg as the temp for as_Address
2100 __ cmpoop(rscratch1, as_Address(addr, noreg));
2101 } else {
2102 ShouldNotReachHere();
2103 }
2104
2105 } else {
2106 ShouldNotReachHere();
2107 }
2108 }
2109
2110 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2111 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2112 if (left->is_single_xmm()) {
2113 assert(right->is_single_xmm(), "must match");
2114 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2115 } else if (left->is_double_xmm()) {
2116 assert(right->is_double_xmm(), "must match");
2117 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2118
2119 } else {
2120 ShouldNotReachHere();
2121 }
2122 } else {
2123 assert(code == lir_cmp_l2i, "check");
2124 Label done;
2125 Register dest = dst->as_register();
2126 __ cmpptr(left->as_register_lo(), right->as_register_lo());
2127 __ movl(dest, -1);
2128 __ jccb(Assembler::less, done);
2129 __ setb(Assembler::notZero, dest);
2130 __ movzbl(dest, dest);
2131 __ bind(done);
2132 }
2133 }
2134
2135
2136 void LIR_Assembler::align_call(LIR_Code code) {
2137 // make sure that the displacement word of the call ends up word aligned
2138 int offset = __ offset();
2139 switch (code) {
2140 case lir_static_call:
2141 case lir_optvirtual_call:
2142 case lir_dynamic_call:
2143 offset += NativeCall::displacement_offset;
2144 break;
2145 case lir_icvirtual_call:
2146 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex;
2147 break;
2148 default: ShouldNotReachHere();
2149 }
2150 __ align(BytesPerWord, offset);
2151 }
2152
2153
2154 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2155 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2156 "must be aligned");
2157 __ call(AddressLiteral(op->addr(), rtype));
2158 add_call_info(code_offset(), op->info());
2159 __ post_call_nop();
2160 }
2161
2162
2163 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2164 __ ic_call(op->addr());
2165 add_call_info(code_offset(), op->info());
2166 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2167 "must be aligned");
2168 __ post_call_nop();
2169 }
2170
2171
2172 void LIR_Assembler::emit_static_call_stub() {
2173 address call_pc = __ pc();
2174 address stub = __ start_a_stub(call_stub_size());
2175 if (stub == nullptr) {
2176 bailout("static call stub overflow");
2177 return;
2178 }
2179
2180 int start = __ offset();
2181
2182 // make sure that the displacement word of the call ends up word aligned
2183 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset);
2184 __ relocate(static_stub_Relocation::spec(call_pc));
2185 __ mov_metadata(rbx, (Metadata*)nullptr);
2186 // must be set to -1 at code generation time
2187 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2188 // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2189 __ jump(RuntimeAddress(__ pc()));
2190
2191 assert(__ offset() - start <= call_stub_size(), "stub too big");
2192 __ end_a_stub();
2193 }
2194
2195
2196 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2197 assert(exceptionOop->as_register() == rax, "must match");
2198 assert(exceptionPC->as_register() == rdx, "must match");
2199
2200 // exception object is not added to oop map by LinearScan
2201 // (LinearScan assumes that no oops are in fixed registers)
2202 info->add_register_oop(exceptionOop);
2203 StubId unwind_id;
2204
2205 // get current pc information
2206 // pc is only needed if the method has an exception handler, the unwind code does not need it.
2207 int pc_for_athrow_offset = __ offset();
2208 InternalAddress pc_for_athrow(__ pc());
2209 __ lea(exceptionPC->as_register(), pc_for_athrow);
2210 add_call_info(pc_for_athrow_offset, info); // for exception handler
2211
2212 __ verify_not_null_oop(rax);
2213 // search an exception handler (rax: exception oop, rdx: throwing pc)
2214 if (compilation()->has_fpu_code()) {
2215 unwind_id = StubId::c1_handle_exception_id;
2216 } else {
2217 unwind_id = StubId::c1_handle_exception_nofpu_id;
2218 }
2219 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2220
2221 // enough room for two byte trap
2222 __ nop();
2223 }
2224
2225
2226 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2227 assert(exceptionOop->as_register() == rax, "must match");
2228
2229 __ jmp(_unwind_handler_entry);
2230 }
2231
2232
2233 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2234
2235 // optimized version for linear scan:
2236 // * count must be already in ECX (guaranteed by LinearScan)
2237 // * left and dest must be equal
2238 // * tmp must be unused
2239 assert(count->as_register() == SHIFT_count, "count must be in ECX");
2240 assert(left == dest, "left and dest must be equal");
2241 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2242
2243 if (left->is_single_cpu()) {
2244 Register value = left->as_register();
2245 assert(value != SHIFT_count, "left cannot be ECX");
2246
2247 switch (code) {
2248 case lir_shl: __ shll(value); break;
2249 case lir_shr: __ sarl(value); break;
2250 case lir_ushr: __ shrl(value); break;
2251 default: ShouldNotReachHere();
2252 }
2253 } else if (left->is_double_cpu()) {
2254 Register lo = left->as_register_lo();
2255 Register hi = left->as_register_hi();
2256 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2257 switch (code) {
2258 case lir_shl: __ shlptr(lo); break;
2259 case lir_shr: __ sarptr(lo); break;
2260 case lir_ushr: __ shrptr(lo); break;
2261 default: ShouldNotReachHere();
2262 }
2263 } else {
2264 ShouldNotReachHere();
2265 }
2266 }
2267
2268
2269 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2270 if (dest->is_single_cpu()) {
2271 // first move left into dest so that left is not destroyed by the shift
2272 Register value = dest->as_register();
2273 count = count & 0x1F; // Java spec
2274
2275 move_regs(left->as_register(), value);
2276 switch (code) {
2277 case lir_shl: __ shll(value, count); break;
2278 case lir_shr: __ sarl(value, count); break;
2279 case lir_ushr: __ shrl(value, count); break;
2280 default: ShouldNotReachHere();
2281 }
2282 } else if (dest->is_double_cpu()) {
2283 // first move left into dest so that left is not destroyed by the shift
2284 Register value = dest->as_register_lo();
2285 count = count & 0x1F; // Java spec
2286
2287 move_regs(left->as_register_lo(), value);
2288 switch (code) {
2289 case lir_shl: __ shlptr(value, count); break;
2290 case lir_shr: __ sarptr(value, count); break;
2291 case lir_ushr: __ shrptr(value, count); break;
2292 default: ShouldNotReachHere();
2293 }
2294 } else {
2295 ShouldNotReachHere();
2296 }
2297 }
2298
2299
2300 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2301 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2302 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2303 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2304 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
2305 }
2306
2307
2308 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2309 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2310 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2311 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2312 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
2313 }
2314
2315
2316 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2317 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2318 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2319 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2320 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
2321 }
2322
2323
2324 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
2325 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2326 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2327 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2328 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
2329 }
2330
2331
2332 // This code replaces a call to arraycopy; no exception may
2333 // be thrown in this code, they must be thrown in the System.arraycopy
2334 // activation frame; we could save some checks if this would not be the case
2335 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2336 ciArrayKlass* default_type = op->expected_type();
2337 Register src = op->src()->as_register();
2338 Register dst = op->dst()->as_register();
2339 Register src_pos = op->src_pos()->as_register();
2340 Register dst_pos = op->dst_pos()->as_register();
2341 Register length = op->length()->as_register();
2342 Register tmp = op->tmp()->as_register();
2343 Register tmp_load_klass = rscratch1;
2344 Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
2345
2346 CodeStub* stub = op->stub();
2347 int flags = op->flags();
2348 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2349 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2350
2351 // if we don't know anything, just go through the generic arraycopy
2352 if (default_type == nullptr) {
2353 // save outgoing arguments on stack in case call to System.arraycopy is needed
2354 // HACK ALERT. This code used to push the parameters in a hardwired fashion
2355 // for interpreter calling conventions. Now we have to do it in new style conventions.
2356 // For the moment until C1 gets the new register allocator I just force all the
2357 // args to the right place (except the register args) and then on the back side
2358 // reload the register args properly if we go slow path. Yuck
2359
2360 // These are proper for the calling convention
2361 store_parameter(length, 2);
2362 store_parameter(dst_pos, 1);
2363 store_parameter(dst, 0);
2364
2365 // these are just temporary placements until we need to reload
2366 store_parameter(src_pos, 3);
2367 store_parameter(src, 4);
2368
2369 address copyfunc_addr = StubRoutines::generic_arraycopy();
2370 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2371
2372 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
2373 // The arguments are in java calling convention so we can trivially shift them to C
2374 // convention
2375 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2376 __ mov(c_rarg0, j_rarg0);
2377 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2378 __ mov(c_rarg1, j_rarg1);
2379 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2380 __ mov(c_rarg2, j_rarg2);
2381 assert_different_registers(c_rarg3, j_rarg4);
2382 __ mov(c_rarg3, j_rarg3);
2383 #ifdef _WIN64
2384 // Allocate abi space for args but be sure to keep stack aligned
2385 __ subptr(rsp, 6*wordSize);
2386 store_parameter(j_rarg4, 4);
2387 #ifndef PRODUCT
2388 if (PrintC1Statistics) {
2389 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2390 }
2391 #endif
2392 __ call(RuntimeAddress(copyfunc_addr));
2393 __ addptr(rsp, 6*wordSize);
2394 #else
2395 __ mov(c_rarg4, j_rarg4);
2396 #ifndef PRODUCT
2397 if (PrintC1Statistics) {
2398 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2399 }
2400 #endif
2401 __ call(RuntimeAddress(copyfunc_addr));
2402 #endif // _WIN64
2403
2404 __ testl(rax, rax);
2405 __ jcc(Assembler::equal, *stub->continuation());
2406
2407 __ mov(tmp, rax);
2408 __ xorl(tmp, -1);
2409
2410 // Reload values from the stack so they are where the stub
2411 // expects them.
2412 __ movptr (dst, Address(rsp, 0*BytesPerWord));
2413 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
2414 __ movptr (length, Address(rsp, 2*BytesPerWord));
2415 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
2416 __ movptr (src, Address(rsp, 4*BytesPerWord));
2417
2418 __ subl(length, tmp);
2419 __ addl(src_pos, tmp);
2420 __ addl(dst_pos, tmp);
2421 __ jmp(*stub->entry());
2422
2423 __ bind(*stub->continuation());
2424 return;
2425 }
2426
2427 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2428
2429 int elem_size = type2aelembytes(basic_type);
2430 Address::ScaleFactor scale;
2431
2432 switch (elem_size) {
2433 case 1 :
2434 scale = Address::times_1;
2435 break;
2436 case 2 :
2437 scale = Address::times_2;
2438 break;
2439 case 4 :
2440 scale = Address::times_4;
2441 break;
2442 case 8 :
2443 scale = Address::times_8;
2444 break;
2445 default:
2446 scale = Address::no_scale;
2447 ShouldNotReachHere();
2448 }
2449
2450 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2451 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2452
2453 // length and pos's are all sign extended at this point on 64bit
2454
2455 // test for null
2456 if (flags & LIR_OpArrayCopy::src_null_check) {
2457 __ testptr(src, src);
2458 __ jcc(Assembler::zero, *stub->entry());
2459 }
2460 if (flags & LIR_OpArrayCopy::dst_null_check) {
2461 __ testptr(dst, dst);
2462 __ jcc(Assembler::zero, *stub->entry());
2463 }
2464
2465 // If the compiler was not able to prove that exact type of the source or the destination
2466 // of the arraycopy is an array type, check at runtime if the source or the destination is
2467 // an instance type.
2468 if (flags & LIR_OpArrayCopy::type_check) {
2469 if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2470 __ load_klass(tmp, dst, tmp_load_klass);
2471 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2472 __ jcc(Assembler::greaterEqual, *stub->entry());
2473 }
2474
2475 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2476 __ load_klass(tmp, src, tmp_load_klass);
2477 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2478 __ jcc(Assembler::greaterEqual, *stub->entry());
2479 }
2480 }
2481
2482 // check if negative
2483 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2484 __ testl(src_pos, src_pos);
2485 __ jcc(Assembler::less, *stub->entry());
2486 }
2487 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2488 __ testl(dst_pos, dst_pos);
2489 __ jcc(Assembler::less, *stub->entry());
2490 }
2491
2492 if (flags & LIR_OpArrayCopy::src_range_check) {
2493 __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
2494 __ cmpl(tmp, src_length_addr);
2495 __ jcc(Assembler::above, *stub->entry());
2496 }
2497 if (flags & LIR_OpArrayCopy::dst_range_check) {
2498 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
2499 __ cmpl(tmp, dst_length_addr);
2500 __ jcc(Assembler::above, *stub->entry());
2501 }
2502
2503 if (flags & LIR_OpArrayCopy::length_positive_check) {
2504 __ testl(length, length);
2505 __ jcc(Assembler::less, *stub->entry());
2506 }
2507
2508 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
2509 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
2510
2511 if (flags & LIR_OpArrayCopy::type_check) {
2512 // We don't know the array types are compatible
2513 if (basic_type != T_OBJECT) {
2514 // Simple test for basic type arrays
2515 __ cmp_klasses_from_objects(src, dst, tmp, tmp2);
2516 __ jcc(Assembler::notEqual, *stub->entry());
2517 } else {
2518 // For object arrays, if src is a sub class of dst then we can
2519 // safely do the copy.
2520 Label cont, slow;
2521
2522 __ push_ppx(src);
2523 __ push_ppx(dst);
2524
2525 __ load_klass(src, src, tmp_load_klass);
2526 __ load_klass(dst, dst, tmp_load_klass);
2527
2528 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2529
2530 __ push_ppx(src);
2531 __ push_ppx(dst);
2532 __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2533 __ pop_ppx(dst);
2534 __ pop_ppx(src);
2535
2536 __ testl(src, src);
2537 __ jcc(Assembler::notEqual, cont);
2538
2539 __ bind(slow);
2540 __ pop_ppx(dst);
2541 __ pop_ppx(src);
2542
2543 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2544 if (copyfunc_addr != nullptr) { // use stub if available
2545 // src is not a sub class of dst so we have to do a
2546 // per-element check.
2547
2548 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2549 if ((flags & mask) != mask) {
2550 // Check that at least both of them object arrays.
2551 assert(flags & mask, "one of the two should be known to be an object array");
2552
2553 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2554 __ load_klass(tmp, src, tmp_load_klass);
2555 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2556 __ load_klass(tmp, dst, tmp_load_klass);
2557 }
2558 int lh_offset = in_bytes(Klass::layout_helper_offset());
2559 Address klass_lh_addr(tmp, lh_offset);
2560 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2561 __ cmpl(klass_lh_addr, objArray_lh);
2562 __ jcc(Assembler::notEqual, *stub->entry());
2563 }
2564
2565 // Spill because stubs can use any register they like and it's
2566 // easier to restore just those that we care about.
2567 store_parameter(dst, 0);
2568 store_parameter(dst_pos, 1);
2569 store_parameter(length, 2);
2570 store_parameter(src_pos, 3);
2571 store_parameter(src, 4);
2572
2573 __ movl2ptr(length, length); //higher 32bits must be null
2574
2575 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2576 assert_different_registers(c_rarg0, dst, dst_pos, length);
2577 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2578 assert_different_registers(c_rarg1, dst, length);
2579
2580 __ mov(c_rarg2, length);
2581 assert_different_registers(c_rarg2, dst);
2582
2583 #ifdef _WIN64
2584 // Allocate abi space for args but be sure to keep stack aligned
2585 __ subptr(rsp, 6*wordSize);
2586 __ load_klass(c_rarg3, dst, tmp_load_klass);
2587 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
2588 store_parameter(c_rarg3, 4);
2589 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
2590 __ call(RuntimeAddress(copyfunc_addr));
2591 __ addptr(rsp, 6*wordSize);
2592 #else
2593 __ load_klass(c_rarg4, dst, tmp_load_klass);
2594 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2595 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2596 __ call(RuntimeAddress(copyfunc_addr));
2597 #endif
2598
2599 #ifndef PRODUCT
2600 if (PrintC1Statistics) {
2601 Label failed;
2602 __ testl(rax, rax);
2603 __ jcc(Assembler::notZero, failed);
2604 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1);
2605 __ bind(failed);
2606 }
2607 #endif
2608
2609 __ testl(rax, rax);
2610 __ jcc(Assembler::zero, *stub->continuation());
2611
2612 #ifndef PRODUCT
2613 if (PrintC1Statistics) {
2614 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1);
2615 }
2616 #endif
2617
2618 __ mov(tmp, rax);
2619
2620 __ xorl(tmp, -1);
2621
2622 // Restore previously spilled arguments
2623 __ movptr (dst, Address(rsp, 0*BytesPerWord));
2624 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
2625 __ movptr (length, Address(rsp, 2*BytesPerWord));
2626 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
2627 __ movptr (src, Address(rsp, 4*BytesPerWord));
2628
2629
2630 __ subl(length, tmp);
2631 __ addl(src_pos, tmp);
2632 __ addl(dst_pos, tmp);
2633 }
2634
2635 __ jmp(*stub->entry());
2636
2637 __ bind(cont);
2638 __ pop(dst);
2639 __ pop(src);
2640 }
2641 }
2642
2643 #ifdef ASSERT
2644 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2645 // Sanity check the known type with the incoming class. For the
2646 // primitive case the types must match exactly with src.klass and
2647 // dst.klass each exactly matching the default type. For the
2648 // object array case, if no type check is needed then either the
2649 // dst type is exactly the expected type and the src type is a
2650 // subtype which we can't check or src is the same array as dst
2651 // but not necessarily exactly of type default_type.
2652 Label known_ok, halt;
2653 __ mov_metadata(tmp, default_type->constant_encoding());
2654 __ encode_klass_not_null(tmp, rscratch1);
2655
2656 if (basic_type != T_OBJECT) {
2657 __ cmp_klass(tmp, dst, tmp2);
2658 __ jcc(Assembler::notEqual, halt);
2659 __ cmp_klass(tmp, src, tmp2);
2660 __ jcc(Assembler::equal, known_ok);
2661 } else {
2662 __ cmp_klass(tmp, dst, tmp2);
2663 __ jcc(Assembler::equal, known_ok);
2664 __ cmpptr(src, dst);
2665 __ jcc(Assembler::equal, known_ok);
2666 }
2667 __ bind(halt);
2668 __ stop("incorrect type information in arraycopy");
2669 __ bind(known_ok);
2670 }
2671 #endif
2672
2673 #ifndef PRODUCT
2674 if (PrintC1Statistics) {
2675 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1);
2676 }
2677 #endif
2678
2679 assert_different_registers(c_rarg0, dst, dst_pos, length);
2680 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2681 assert_different_registers(c_rarg1, length);
2682 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2683 __ mov(c_rarg2, length);
2684
2685 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2686 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2687 const char *name;
2688 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2689 __ call_VM_leaf(entry, 0);
2690
2691 if (stub != nullptr) {
2692 __ bind(*stub->continuation());
2693 }
2694 }
2695
2696 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2697 assert(op->crc()->is_single_cpu(), "crc must be register");
2698 assert(op->val()->is_single_cpu(), "byte value must be register");
2699 assert(op->result_opr()->is_single_cpu(), "result must be register");
2700 Register crc = op->crc()->as_register();
2701 Register val = op->val()->as_register();
2702 Register res = op->result_opr()->as_register();
2703
2704 assert_different_registers(val, crc, res);
2705
2706 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
2707 __ notl(crc); // ~crc
2708 __ update_byte_crc32(crc, val, res);
2709 __ notl(crc); // ~crc
2710 __ mov(res, crc);
2711 }
2712
2713 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2714 Register obj = op->obj_opr()->as_register(); // may not be an oop
2715 Register hdr = op->hdr_opr()->as_register();
2716 Register lock = op->lock_opr()->as_register();
2717 if (op->code() == lir_lock) {
2718 Register tmp = op->scratch_opr()->as_register();
2719 // add debug info for NullPointerException only if one is possible
2720 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2721 if (op->info() != nullptr) {
2722 add_debug_info_for_null_check(null_check_offset, op->info());
2723 }
2724 // done
2725 } else if (op->code() == lir_unlock) {
2726 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2727 } else {
2728 Unimplemented();
2729 }
2730 __ bind(*op->stub()->continuation());
2731 }
2732
2733 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2734 Register obj = op->obj()->as_pointer_register();
2735 Register result = op->result_opr()->as_pointer_register();
2736
2737 CodeEmitInfo* info = op->info();
2738 if (info != nullptr) {
2739 add_debug_info_for_null_check_here(info);
2740 }
2741
2742 __ load_klass(result, obj, rscratch1);
2743 }
2744
2745 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2746 ciMethod* method = op->profiled_method();
2747 int bci = op->profiled_bci();
2748 ciMethod* callee = op->profiled_callee();
2749 Register tmp_load_klass = rscratch1;
2750
2751 // Update counter for all call types
2752 ciMethodData* md = method->method_data_or_null();
2753 assert(md != nullptr, "Sanity");
2754 ciProfileData* data = md->bci_to_data(bci);
2755 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2756 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2757 Register mdo = op->mdo()->as_register();
2758 __ mov_metadata(mdo, md->constant_encoding());
2759 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2760 // Perform additional virtual call profiling for invokevirtual and
2761 // invokeinterface bytecodes
2762 if (op->should_profile_receiver_type()) {
2763 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2764 Register recv = op->recv()->as_register();
2765 assert_different_registers(mdo, recv);
2766 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2767 ciKlass* known_klass = op->known_holder();
2768 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2769 // We know the type that will be seen at this call site; we can
2770 // statically update the MethodData* rather than needing to do
2771 // dynamic tests on the receiver type.
2772 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2773 for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
2774 ciKlass* receiver = vc_data->receiver(i);
2775 if (known_klass->equals(receiver)) {
2776 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2777 __ addptr(data_addr, DataLayout::counter_increment);
2778 return;
2779 }
2780 }
2781 // Receiver type is not found in profile data.
2782 // Fall back to runtime helper to handle the rest at runtime.
2783 __ mov_metadata(recv, known_klass->constant_encoding());
2784 } else {
2785 __ load_klass(recv, recv, tmp_load_klass);
2786 }
2787 type_profile_helper(mdo, md, data, recv);
2788 } else {
2789 // Static call
2790 __ addptr(counter_addr, DataLayout::counter_increment);
2791 }
2792 }
2793
2794 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2795 Register obj = op->obj()->as_register();
2796 Register tmp = op->tmp()->as_pointer_register();
2797 Register tmp_load_klass = rscratch1;
2798 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2799 ciKlass* exact_klass = op->exact_klass();
2800 intptr_t current_klass = op->current_klass();
2801 bool not_null = op->not_null();
2802 bool no_conflict = op->no_conflict();
2803
2804 Label update, next, none;
2805
2806 bool do_null = !not_null;
2807 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2808 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2809
2810 assert(do_null || do_update, "why are we here?");
2811 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2812
2813 __ verify_oop(obj);
2814
2815 #ifdef ASSERT
2816 if (obj == tmp) {
2817 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
2818 } else {
2819 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index());
2820 }
2821 #endif
2822 if (do_null) {
2823 __ testptr(obj, obj);
2824 __ jccb(Assembler::notZero, update);
2825 if (!TypeEntries::was_null_seen(current_klass)) {
2826 __ testptr(mdo_addr, TypeEntries::null_seen);
2827 #ifndef ASSERT
2828 __ jccb(Assembler::notZero, next); // already set
2829 #else
2830 __ jcc(Assembler::notZero, next); // already set
2831 #endif
2832 // atomic update to prevent overwriting Klass* with 0
2833 __ lock();
2834 __ orptr(mdo_addr, TypeEntries::null_seen);
2835 }
2836 if (do_update) {
2837 #ifndef ASSERT
2838 __ jmpb(next);
2839 }
2840 #else
2841 __ jmp(next);
2842 }
2843 } else {
2844 __ testptr(obj, obj);
2845 __ jcc(Assembler::notZero, update);
2846 __ stop("unexpected null obj");
2847 #endif
2848 }
2849
2850 __ bind(update);
2851
2852 if (do_update) {
2853 #ifdef ASSERT
2854 if (exact_klass != nullptr) {
2855 Label ok;
2856 __ load_klass(tmp, obj, tmp_load_klass);
2857 __ push_ppx(tmp);
2858 __ mov_metadata(tmp, exact_klass->constant_encoding());
2859 __ cmpptr(tmp, Address(rsp, 0));
2860 __ jcc(Assembler::equal, ok);
2861 __ stop("exact klass and actual klass differ");
2862 __ bind(ok);
2863 __ pop_ppx(tmp);
2864 }
2865 #endif
2866 if (!no_conflict) {
2867 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2868 if (exact_klass != nullptr) {
2869 __ mov_metadata(tmp, exact_klass->constant_encoding());
2870 } else {
2871 __ load_klass(tmp, obj, tmp_load_klass);
2872 }
2873 __ mov(rscratch1, tmp); // save original value before XOR
2874 __ xorptr(tmp, mdo_addr);
2875 __ testptr(tmp, TypeEntries::type_klass_mask);
2876 // klass seen before, nothing to do. The unknown bit may have been
2877 // set already but no need to check.
2878 __ jccb(Assembler::zero, next);
2879
2880 __ testptr(tmp, TypeEntries::type_unknown);
2881 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2882
2883 if (TypeEntries::is_type_none(current_klass)) {
2884 __ testptr(mdo_addr, TypeEntries::type_mask);
2885 __ jccb(Assembler::zero, none);
2886 // There is a chance that the checks above (re-reading profiling
2887 // data from memory) fail if another thread has just set the
2888 // profiling to this obj's klass
2889 __ mov(tmp, rscratch1); // get back original value before XOR
2890 __ xorptr(tmp, mdo_addr);
2891 __ testptr(tmp, TypeEntries::type_klass_mask);
2892 __ jccb(Assembler::zero, next);
2893 }
2894 } else {
2895 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2896 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2897
2898 __ testptr(mdo_addr, TypeEntries::type_unknown);
2899 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2900 }
2901
2902 // different than before. Cannot keep accurate profile.
2903 __ orptr(mdo_addr, TypeEntries::type_unknown);
2904
2905 if (TypeEntries::is_type_none(current_klass)) {
2906 __ jmpb(next);
2907
2908 __ bind(none);
2909 // first time here. Set profile type.
2910 __ movptr(mdo_addr, tmp);
2911 #ifdef ASSERT
2912 __ andptr(tmp, TypeEntries::type_klass_mask);
2913 __ verify_klass_ptr(tmp);
2914 #endif
2915 }
2916 } else {
2917 // There's a single possible klass at this profile point
2918 assert(exact_klass != nullptr, "should be");
2919 if (TypeEntries::is_type_none(current_klass)) {
2920 __ mov_metadata(tmp, exact_klass->constant_encoding());
2921 __ xorptr(tmp, mdo_addr);
2922 __ testptr(tmp, TypeEntries::type_klass_mask);
2923 #ifdef ASSERT
2924 __ jcc(Assembler::zero, next);
2925
2926 {
2927 Label ok;
2928 __ push_ppx(tmp);
2929 __ testptr(mdo_addr, TypeEntries::type_mask);
2930 __ jcc(Assembler::zero, ok);
2931 // may have been set by another thread
2932 __ mov_metadata(tmp, exact_klass->constant_encoding());
2933 __ xorptr(tmp, mdo_addr);
2934 __ testptr(tmp, TypeEntries::type_mask);
2935 __ jcc(Assembler::zero, ok);
2936
2937 __ stop("unexpected profiling mismatch");
2938 __ bind(ok);
2939 __ pop_ppx(tmp);
2940 }
2941 #else
2942 __ jccb(Assembler::zero, next);
2943 #endif
2944 // first time here. Set profile type.
2945 __ movptr(mdo_addr, tmp);
2946 #ifdef ASSERT
2947 __ andptr(tmp, TypeEntries::type_klass_mask);
2948 __ verify_klass_ptr(tmp);
2949 #endif
2950 } else {
2951 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2952 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2953
2954 __ testptr(mdo_addr, TypeEntries::type_unknown);
2955 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2956
2957 __ orptr(mdo_addr, TypeEntries::type_unknown);
2958 }
2959 }
2960 }
2961 __ bind(next);
2962 }
2963
2964 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2965 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2966 }
2967
2968
2969 void LIR_Assembler::align_backward_branch_target() {
2970 __ align(BytesPerWord);
2971 }
2972
2973
2974 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2975 if (left->is_single_cpu()) {
2976 __ negl(left->as_register());
2977 move_regs(left->as_register(), dest->as_register());
2978
2979 } else if (left->is_double_cpu()) {
2980 Register lo = left->as_register_lo();
2981 Register dst = dest->as_register_lo();
2982 __ movptr(dst, lo);
2983 __ negptr(dst);
2984
2985 } else if (dest->is_single_xmm()) {
2986 assert(!tmp->is_valid(), "do not need temporary");
2987 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
2988 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
2989 }
2990 __ xorps(dest->as_xmm_float_reg(),
2991 ExternalAddress((address)float_signflip_pool),
2992 rscratch1);
2993 } else if (dest->is_double_xmm()) {
2994 assert(!tmp->is_valid(), "do not need temporary");
2995 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
2996 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
2997 }
2998 __ xorpd(dest->as_xmm_double_reg(),
2999 ExternalAddress((address)double_signflip_pool),
3000 rscratch1);
3001 } else {
3002 ShouldNotReachHere();
3003 }
3004 }
3005
3006
3007 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3008 assert(src->is_address(), "must be an address");
3009 assert(dest->is_register(), "must be a register");
3010
3011 PatchingStub* patch = nullptr;
3012 if (patch_code != lir_patch_none) {
3013 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3014 }
3015
3016 Register reg = dest->as_pointer_register();
3017 LIR_Address* addr = src->as_address_ptr();
3018 __ lea(reg, as_Address(addr));
3019
3020 if (patch != nullptr) {
3021 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3022 }
3023 }
3024
3025
3026
3027 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3028 assert(!tmp->is_valid(), "don't need temporary");
3029 __ call(RuntimeAddress(dest));
3030 if (info != nullptr) {
3031 add_call_info_here(info);
3032 }
3033 __ post_call_nop();
3034 }
3035
3036
3037 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3038 assert(type == T_LONG, "only for volatile long fields");
3039
3040 if (info != nullptr) {
3041 add_debug_info_for_null_check_here(info);
3042 }
3043
3044 if (src->is_double_xmm()) {
3045 if (dest->is_double_cpu()) {
3046 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3047 } else if (dest->is_double_stack()) {
3048 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3049 } else if (dest->is_address()) {
3050 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3051 } else {
3052 ShouldNotReachHere();
3053 }
3054
3055 } else if (dest->is_double_xmm()) {
3056 if (src->is_double_stack()) {
3057 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3058 } else if (src->is_address()) {
3059 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3060 } else {
3061 ShouldNotReachHere();
3062 }
3063
3064 } else {
3065 ShouldNotReachHere();
3066 }
3067 }
3068
3069 #ifdef ASSERT
3070 // emit run-time assertion
3071 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3072 assert(op->code() == lir_assert, "must be");
3073
3074 if (op->in_opr1()->is_valid()) {
3075 assert(op->in_opr2()->is_valid(), "both operands must be valid");
3076 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3077 } else {
3078 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3079 assert(op->condition() == lir_cond_always, "no other conditions allowed");
3080 }
3081
3082 Label ok;
3083 if (op->condition() != lir_cond_always) {
3084 Assembler::Condition acond = Assembler::zero;
3085 switch (op->condition()) {
3086 case lir_cond_equal: acond = Assembler::equal; break;
3087 case lir_cond_notEqual: acond = Assembler::notEqual; break;
3088 case lir_cond_less: acond = Assembler::less; break;
3089 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3090 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3091 case lir_cond_greater: acond = Assembler::greater; break;
3092 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
3093 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
3094 default: ShouldNotReachHere();
3095 }
3096 __ jcc(acond, ok);
3097 }
3098 if (op->halt()) {
3099 const char* str = __ code_string(op->msg());
3100 __ stop(str);
3101 } else {
3102 breakpoint();
3103 }
3104 __ bind(ok);
3105 }
3106 #endif
3107
3108 void LIR_Assembler::membar() {
3109 // QQQ sparc TSO uses this,
3110 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3111 }
3112
3113 void LIR_Assembler::membar_acquire() {
3114 // No x86 machines currently require load fences
3115 }
3116
3117 void LIR_Assembler::membar_release() {
3118 // No x86 machines currently require store fences
3119 }
3120
3121 void LIR_Assembler::membar_loadload() {
3122 // no-op
3123 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3124 }
3125
3126 void LIR_Assembler::membar_storestore() {
3127 // no-op
3128 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3129 }
3130
3131 void LIR_Assembler::membar_loadstore() {
3132 // no-op
3133 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3134 }
3135
3136 void LIR_Assembler::membar_storeload() {
3137 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3138 }
3139
3140 void LIR_Assembler::on_spin_wait() {
3141 __ pause ();
3142 }
3143
3144 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3145 assert(result_reg->is_register(), "check");
3146 __ mov(result_reg->as_register(), r15_thread);
3147 }
3148
3149
3150 void LIR_Assembler::peephole(LIR_List*) {
3151 // do nothing for now
3152 }
3153
3154 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3155 assert(data == dest, "xchg/xadd uses only 2 operands");
3156
3157 if (data->type() == T_INT) {
3158 if (code == lir_xadd) {
3159 __ lock();
3160 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3161 } else {
3162 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3163 }
3164 } else if (data->is_oop()) {
3165 assert (code == lir_xchg, "xadd for oops");
3166 Register obj = data->as_register();
3167 if (UseCompressedOops) {
3168 __ encode_heap_oop(obj);
3169 __ xchgl(obj, as_Address(src->as_address_ptr()));
3170 __ decode_heap_oop(obj);
3171 } else {
3172 __ xchgptr(obj, as_Address(src->as_address_ptr()));
3173 }
3174 } else if (data->type() == T_LONG) {
3175 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3176 if (code == lir_xadd) {
3177 __ lock();
3178 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3179 } else {
3180 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3181 }
3182 } else {
3183 ShouldNotReachHere();
3184 }
3185 }
3186
3187 #undef __