1 /*
2 * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gc_globals.hpp"
39 #include "nativeInst_x86.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_x86.inline.hpp"
47
48
49 // These masks are used to provide 128-bit aligned bitmasks to the XMM
50 // instructions, to allow sign-masking or sign-bit flipping. They allow
51 // fast versions of NegF/NegD and AbsF/AbsD.
52
53 // Note: 'double' and 'long long' have 32-bits alignment on x86.
54 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
55 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
56 // of 128-bits operands for SSE instructions.
57 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
58 // Store the value to a 128-bits operand.
59 operand[0] = lo;
60 operand[1] = hi;
61 return operand;
62 }
63
64 // Buffer for 128-bits masks used by SSE instructions.
65 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
66
67 // Static initialization during VM startup.
68 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
69 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
70 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
71 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
72
73
74 NEEDS_CLEANUP // remove this definitions ?
75 const Register IC_Klass = rax; // where the IC klass is cached
76 const Register SYNC_header = rax; // synchronization header
77 const Register SHIFT_count = rcx; // where count for shift operations must be
78
79 #define __ _masm->
80
81
82 static void select_different_registers(Register preserve,
83 Register extra,
84 Register &tmp1,
85 Register &tmp2) {
86 if (tmp1 == preserve) {
87 assert_different_registers(tmp1, tmp2, extra);
88 tmp1 = extra;
89 } else if (tmp2 == preserve) {
90 assert_different_registers(tmp1, tmp2, extra);
91 tmp2 = extra;
92 }
93 assert_different_registers(preserve, tmp1, tmp2);
94 }
95
96
97
98 static void select_different_registers(Register preserve,
99 Register extra,
100 Register &tmp1,
101 Register &tmp2,
102 Register &tmp3) {
103 if (tmp1 == preserve) {
104 assert_different_registers(tmp1, tmp2, tmp3, extra);
105 tmp1 = extra;
106 } else if (tmp2 == preserve) {
107 assert_different_registers(tmp1, tmp2, tmp3, extra);
108 tmp2 = extra;
109 } else if (tmp3 == preserve) {
110 assert_different_registers(tmp1, tmp2, tmp3, extra);
111 tmp3 = extra;
112 }
113 assert_different_registers(preserve, tmp1, tmp2, tmp3);
114 }
115
116
117
118 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
119 if (opr->is_constant()) {
120 LIR_Const* constant = opr->as_constant_ptr();
121 switch (constant->type()) {
122 case T_INT: {
123 return true;
124 }
125
126 default:
127 return false;
128 }
129 }
130 return false;
131 }
132
133
134 LIR_Opr LIR_Assembler::receiverOpr() {
135 return FrameMap::receiver_opr;
136 }
137
138 LIR_Opr LIR_Assembler::osrBufferPointer() {
139 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
140 }
141
142 //--------------fpu register translations-----------------------
143
144
145 address LIR_Assembler::float_constant(float f) {
146 address const_addr = __ float_constant(f);
147 if (const_addr == NULL) {
148 bailout("const section overflow");
149 return __ code()->consts()->start();
150 } else {
151 return const_addr;
152 }
153 }
154
155
156 address LIR_Assembler::double_constant(double d) {
157 address const_addr = __ double_constant(d);
158 if (const_addr == NULL) {
159 bailout("const section overflow");
160 return __ code()->consts()->start();
161 } else {
162 return const_addr;
163 }
164 }
165
166 #ifndef _LP64
167 void LIR_Assembler::fpop() {
168 __ fpop();
169 }
170
171 void LIR_Assembler::fxch(int i) {
172 __ fxch(i);
173 }
174
175 void LIR_Assembler::fld(int i) {
176 __ fld_s(i);
177 }
178
179 void LIR_Assembler::ffree(int i) {
180 __ ffree(i);
181 }
182 #endif // !_LP64
183
184 void LIR_Assembler::breakpoint() {
185 __ int3();
186 }
187
188 void LIR_Assembler::push(LIR_Opr opr) {
189 if (opr->is_single_cpu()) {
190 __ push_reg(opr->as_register());
191 } else if (opr->is_double_cpu()) {
192 NOT_LP64(__ push_reg(opr->as_register_hi()));
193 __ push_reg(opr->as_register_lo());
194 } else if (opr->is_stack()) {
195 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
196 } else if (opr->is_constant()) {
197 LIR_Const* const_opr = opr->as_constant_ptr();
198 if (const_opr->type() == T_OBJECT) {
199 __ push_oop(const_opr->as_jobject());
200 } else if (const_opr->type() == T_INT) {
201 __ push_jint(const_opr->as_jint());
202 } else {
203 ShouldNotReachHere();
204 }
205
206 } else {
207 ShouldNotReachHere();
208 }
209 }
210
211 void LIR_Assembler::pop(LIR_Opr opr) {
212 if (opr->is_single_cpu()) {
213 __ pop_reg(opr->as_register());
214 } else {
215 ShouldNotReachHere();
216 }
217 }
218
219 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
220 return addr->base()->is_illegal() && addr->index()->is_illegal();
221 }
222
223 //-------------------------------------------
224
225 Address LIR_Assembler::as_Address(LIR_Address* addr) {
226 return as_Address(addr, rscratch1);
227 }
228
229 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
230 if (addr->base()->is_illegal()) {
231 assert(addr->index()->is_illegal(), "must be illegal too");
232 AddressLiteral laddr((address)addr->disp(), relocInfo::none);
233 if (! __ reachable(laddr)) {
234 __ movptr(tmp, laddr.addr());
235 Address res(tmp, 0);
236 return res;
237 } else {
238 return __ as_Address(laddr);
239 }
240 }
241
242 Register base = addr->base()->as_pointer_register();
243
244 if (addr->index()->is_illegal()) {
245 return Address( base, addr->disp());
246 } else if (addr->index()->is_cpu_register()) {
247 Register index = addr->index()->as_pointer_register();
248 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
249 } else if (addr->index()->is_constant()) {
250 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
251 assert(Assembler::is_simm32(addr_offset), "must be");
252
253 return Address(base, addr_offset);
254 } else {
255 Unimplemented();
256 return Address();
257 }
258 }
259
260
261 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
262 Address base = as_Address(addr);
263 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
264 }
265
266
267 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
268 return as_Address(addr);
269 }
270
271
272 void LIR_Assembler::osr_entry() {
273 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
274 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
275 ValueStack* entry_state = osr_entry->state();
276 int number_of_locks = entry_state->locks_size();
277
278 // we jump here if osr happens with the interpreter
279 // state set up to continue at the beginning of the
280 // loop that triggered osr - in particular, we have
281 // the following registers setup:
282 //
283 // rcx: osr buffer
284 //
285
286 // build frame
287 ciMethod* m = compilation()->method();
288 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
289
290 // OSR buffer is
291 //
292 // locals[nlocals-1..0]
293 // monitors[0..number_of_locks]
294 //
295 // locals is a direct copy of the interpreter frame so in the osr buffer
296 // so first slot in the local array is the last local from the interpreter
297 // and last slot is local[0] (receiver) from the interpreter
298 //
299 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
300 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
301 // in the interpreter frame (the method lock if a sync method)
302
303 // Initialize monitors in the compiled activation.
304 // rcx: pointer to osr buffer
305 //
306 // All other registers are dead at this point and the locals will be
307 // copied into place by code emitted in the IR.
308
309 Register OSR_buf = osrBufferPointer()->as_pointer_register();
310 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
311 int monitor_offset = BytesPerWord * method()->max_locals() +
312 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
313 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
314 // the OSR buffer using 2 word entries: first the lock and then
315 // the oop.
316 for (int i = 0; i < number_of_locks; i++) {
317 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
318 #ifdef ASSERT
319 // verify the interpreter's monitor has a non-null object
320 {
321 Label L;
322 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
323 __ jcc(Assembler::notZero, L);
324 __ stop("locked object is NULL");
325 __ bind(L);
326 }
327 #endif
328 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
329 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
330 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
331 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
332 }
333 }
334 }
335
336
337 // inline cache check; done before the frame is built.
338 int LIR_Assembler::check_icache() {
339 Register receiver = FrameMap::receiver_opr->as_register();
340 Register ic_klass = IC_Klass;
341 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
342 const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
343 if (!do_post_padding) {
344 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
345 __ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
346 }
347 int offset = __ offset();
348 __ inline_cache_check(receiver, IC_Klass);
349 assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
350 if (do_post_padding) {
351 // force alignment after the cache check.
352 // It's been verified to be aligned if !VerifyOops
353 __ align(CodeEntryAlignment);
354 }
355 return offset;
356 }
357
358 void LIR_Assembler::clinit_barrier(ciMethod* method) {
359 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
360 assert(!method->holder()->is_not_initialized(), "initialization should have been started");
361
362 Label L_skip_barrier;
363 Register klass = rscratch1;
364 Register thread = LP64_ONLY( r15_thread ) NOT_LP64( noreg );
365 assert(thread != noreg, "x86_32 not implemented");
366
367 __ mov_metadata(klass, method->holder()->constant_encoding());
368 __ clinit_barrier(klass, thread, &L_skip_barrier /*L_fast_path*/);
369
370 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
371
372 __ bind(L_skip_barrier);
373 }
374
375 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
376 jobject o = NULL;
377 PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
378 __ movoop(reg, o);
379 patching_epilog(patch, lir_patch_normal, reg, info);
380 }
381
382 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
383 Metadata* o = NULL;
384 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
385 __ mov_metadata(reg, o);
386 patching_epilog(patch, lir_patch_normal, reg, info);
387 }
388
389 // This specifies the rsp decrement needed to build the frame
390 int LIR_Assembler::initial_frame_size_in_bytes() const {
391 // if rounding, must let FrameMap know!
392
393 // The frame_map records size in slots (32bit word)
394
395 // subtract two words to account for return address and link
396 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
397 }
398
399
400 int LIR_Assembler::emit_exception_handler() {
401 // if the last instruction is a call (typically to do a throw which
402 // is coming at the end after block reordering) the return address
403 // must still point into the code area in order to avoid assertion
404 // failures when searching for the corresponding bci => add a nop
405 // (was bug 5/14/1999 - gri)
406 __ nop();
407
408 // generate code for exception handler
409 address handler_base = __ start_a_stub(exception_handler_size());
410 if (handler_base == NULL) {
411 // not enough space left for the handler
412 bailout("exception handler overflow");
413 return -1;
414 }
415
416 int offset = code_offset();
417
418 // the exception oop and pc are in rax, and rdx
419 // no other registers need to be preserved, so invalidate them
420 __ invalidate_registers(false, true, true, false, true, true);
421
422 // check that there is really an exception
423 __ verify_not_null_oop(rax);
424
425 // search an exception handler (rax: exception oop, rdx: throwing pc)
426 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
427 __ should_not_reach_here();
428 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
429 __ end_a_stub();
430
431 return offset;
432 }
433
434
435 // Emit the code to remove the frame from the stack in the exception
436 // unwind path.
437 int LIR_Assembler::emit_unwind_handler() {
438 #ifndef PRODUCT
439 if (CommentedAssembly) {
440 _masm->block_comment("Unwind handler");
441 }
442 #endif
443
444 int offset = code_offset();
445
446 // Fetch the exception from TLS and clear out exception related thread state
447 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
448 NOT_LP64(__ get_thread(rsi));
449 __ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
450 __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
451 __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
452
453 __ bind(_unwind_handler_entry);
454 __ verify_not_null_oop(rax);
455 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
456 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
457 }
458
459 // Preform needed unlocking
460 MonitorExitStub* stub = NULL;
461 if (method()->is_synchronized()) {
462 monitor_address(0, FrameMap::rax_opr);
463 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
464 __ unlock_object(rdi, rsi, rax, *stub->entry());
465 __ bind(*stub->continuation());
466 }
467
468 if (compilation()->env()->dtrace_method_probes()) {
469 #ifdef _LP64
470 __ mov(rdi, r15_thread);
471 __ mov_metadata(rsi, method()->constant_encoding());
472 #else
473 __ get_thread(rax);
474 __ movptr(Address(rsp, 0), rax);
475 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
476 #endif
477 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
478 }
479
480 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
481 __ mov(rax, rbx); // Restore the exception
482 }
483
484 // remove the activation and dispatch to the unwind handler
485 __ remove_frame(initial_frame_size_in_bytes());
486 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
487
488 // Emit the slow path assembly
489 if (stub != NULL) {
490 stub->emit_code(this);
491 }
492
493 return offset;
494 }
495
496
497 int LIR_Assembler::emit_deopt_handler() {
498 // if the last instruction is a call (typically to do a throw which
499 // is coming at the end after block reordering) the return address
500 // must still point into the code area in order to avoid assertion
501 // failures when searching for the corresponding bci => add a nop
502 // (was bug 5/14/1999 - gri)
503 __ nop();
504
505 // generate code for exception handler
506 address handler_base = __ start_a_stub(deopt_handler_size());
507 if (handler_base == NULL) {
508 // not enough space left for the handler
509 bailout("deopt handler overflow");
510 return -1;
511 }
512
513 int offset = code_offset();
514 InternalAddress here(__ pc());
515
516 __ pushptr(here.addr());
517 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
518 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
519 __ end_a_stub();
520
521 return offset;
522 }
523
524 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
525 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
526 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
527 assert(result->fpu() == 0, "result must already be on TOS");
528 }
529
530 // Pop the stack before the safepoint code
531 __ remove_frame(initial_frame_size_in_bytes());
532
533 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
534 __ reserved_stack_check();
535 }
536
537 // Note: we do not need to round double result; float result has the right precision
538 // the poll sets the condition code, but no data registers
539
540 #ifdef _LP64
541 const Register thread = r15_thread;
542 #else
543 const Register thread = rbx;
544 __ get_thread(thread);
545 #endif
546 code_stub->set_safepoint_offset(__ offset());
547 __ relocate(relocInfo::poll_return_type);
548 __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
549 __ ret(0);
550 }
551
552
553 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
554 guarantee(info != NULL, "Shouldn't be NULL");
555 int offset = __ offset();
556 #ifdef _LP64
557 const Register poll_addr = rscratch1;
558 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
559 #else
560 assert(tmp->is_cpu_register(), "needed");
561 const Register poll_addr = tmp->as_register();
562 __ get_thread(poll_addr);
563 __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
564 #endif
565 add_debug_info_for_branch(info);
566 __ relocate(relocInfo::poll_type);
567 address pre_pc = __ pc();
568 __ testl(rax, Address(poll_addr, 0));
569 address post_pc = __ pc();
570 guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
571 return offset;
572 }
573
574
575 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
576 if (from_reg != to_reg) __ mov(to_reg, from_reg);
577 }
578
579 void LIR_Assembler::swap_reg(Register a, Register b) {
580 __ xchgptr(a, b);
581 }
582
583
584 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
585 assert(src->is_constant(), "should not call otherwise");
586 assert(dest->is_register(), "should not call otherwise");
587 LIR_Const* c = src->as_constant_ptr();
588
589 switch (c->type()) {
590 case T_INT: {
591 assert(patch_code == lir_patch_none, "no patching handled here");
592 __ movl(dest->as_register(), c->as_jint());
593 break;
594 }
595
596 case T_ADDRESS: {
597 assert(patch_code == lir_patch_none, "no patching handled here");
598 __ movptr(dest->as_register(), c->as_jint());
599 break;
600 }
601
602 case T_LONG: {
603 assert(patch_code == lir_patch_none, "no patching handled here");
604 #ifdef _LP64
605 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
606 #else
607 __ movptr(dest->as_register_lo(), c->as_jint_lo());
608 __ movptr(dest->as_register_hi(), c->as_jint_hi());
609 #endif // _LP64
610 break;
611 }
612
613 case T_OBJECT: {
614 if (patch_code != lir_patch_none) {
615 jobject2reg_with_patching(dest->as_register(), info);
616 } else {
617 __ movoop(dest->as_register(), c->as_jobject());
618 }
619 break;
620 }
621
622 case T_METADATA: {
623 if (patch_code != lir_patch_none) {
624 klass2reg_with_patching(dest->as_register(), info);
625 } else {
626 __ mov_metadata(dest->as_register(), c->as_metadata());
627 }
628 break;
629 }
630
631 case T_FLOAT: {
632 if (dest->is_single_xmm()) {
633 if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_float()) {
634 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
635 } else {
636 __ movflt(dest->as_xmm_float_reg(),
637 InternalAddress(float_constant(c->as_jfloat())));
638 }
639 } else {
640 #ifndef _LP64
641 assert(dest->is_single_fpu(), "must be");
642 assert(dest->fpu_regnr() == 0, "dest must be TOS");
643 if (c->is_zero_float()) {
644 __ fldz();
645 } else if (c->is_one_float()) {
646 __ fld1();
647 } else {
648 __ fld_s (InternalAddress(float_constant(c->as_jfloat())));
649 }
650 #else
651 ShouldNotReachHere();
652 #endif // !_LP64
653 }
654 break;
655 }
656
657 case T_DOUBLE: {
658 if (dest->is_double_xmm()) {
659 if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_double()) {
660 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
661 } else {
662 __ movdbl(dest->as_xmm_double_reg(),
663 InternalAddress(double_constant(c->as_jdouble())));
664 }
665 } else {
666 #ifndef _LP64
667 assert(dest->is_double_fpu(), "must be");
668 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
669 if (c->is_zero_double()) {
670 __ fldz();
671 } else if (c->is_one_double()) {
672 __ fld1();
673 } else {
674 __ fld_d (InternalAddress(double_constant(c->as_jdouble())));
675 }
676 #else
677 ShouldNotReachHere();
678 #endif // !_LP64
679 }
680 break;
681 }
682
683 default:
684 ShouldNotReachHere();
685 }
686 }
687
688 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
689 assert(src->is_constant(), "should not call otherwise");
690 assert(dest->is_stack(), "should not call otherwise");
691 LIR_Const* c = src->as_constant_ptr();
692
693 switch (c->type()) {
694 case T_INT: // fall through
695 case T_FLOAT:
696 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
697 break;
698
699 case T_ADDRESS:
700 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
701 break;
702
703 case T_OBJECT:
704 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
705 break;
706
707 case T_LONG: // fall through
708 case T_DOUBLE:
709 #ifdef _LP64
710 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
711 lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
712 #else
713 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
714 lo_word_offset_in_bytes), c->as_jint_lo_bits());
715 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
716 hi_word_offset_in_bytes), c->as_jint_hi_bits());
717 #endif // _LP64
718 break;
719
720 default:
721 ShouldNotReachHere();
722 }
723 }
724
725 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
726 assert(src->is_constant(), "should not call otherwise");
727 assert(dest->is_address(), "should not call otherwise");
728 LIR_Const* c = src->as_constant_ptr();
729 LIR_Address* addr = dest->as_address_ptr();
730
731 int null_check_here = code_offset();
732 switch (type) {
733 case T_INT: // fall through
734 case T_FLOAT:
735 __ movl(as_Address(addr), c->as_jint_bits());
736 break;
737
738 case T_ADDRESS:
739 __ movptr(as_Address(addr), c->as_jint_bits());
740 break;
741
742 case T_OBJECT: // fall through
743 case T_ARRAY:
744 if (c->as_jobject() == NULL) {
745 if (UseCompressedOops && !wide) {
746 __ movl(as_Address(addr), (int32_t)NULL_WORD);
747 } else {
748 #ifdef _LP64
749 __ xorptr(rscratch1, rscratch1);
750 null_check_here = code_offset();
751 __ movptr(as_Address(addr), rscratch1);
752 #else
753 __ movptr(as_Address(addr), NULL_WORD);
754 #endif
755 }
756 } else {
757 if (is_literal_address(addr)) {
758 ShouldNotReachHere();
759 __ movoop(as_Address(addr, noreg), c->as_jobject());
760 } else {
761 #ifdef _LP64
762 __ movoop(rscratch1, c->as_jobject());
763 if (UseCompressedOops && !wide) {
764 __ encode_heap_oop(rscratch1);
765 null_check_here = code_offset();
766 __ movl(as_Address_lo(addr), rscratch1);
767 } else {
768 null_check_here = code_offset();
769 __ movptr(as_Address_lo(addr), rscratch1);
770 }
771 #else
772 __ movoop(as_Address(addr), c->as_jobject());
773 #endif
774 }
775 }
776 break;
777
778 case T_LONG: // fall through
779 case T_DOUBLE:
780 #ifdef _LP64
781 if (is_literal_address(addr)) {
782 ShouldNotReachHere();
783 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
784 } else {
785 __ movptr(r10, (intptr_t)c->as_jlong_bits());
786 null_check_here = code_offset();
787 __ movptr(as_Address_lo(addr), r10);
788 }
789 #else
790 // Always reachable in 32bit so this doesn't produce useless move literal
791 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits());
792 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits());
793 #endif // _LP64
794 break;
795
796 case T_BOOLEAN: // fall through
797 case T_BYTE:
798 __ movb(as_Address(addr), c->as_jint() & 0xFF);
799 break;
800
801 case T_CHAR: // fall through
802 case T_SHORT:
803 __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
804 break;
805
806 default:
807 ShouldNotReachHere();
808 };
809
810 if (info != NULL) {
811 add_debug_info_for_null_check(null_check_here, info);
812 }
813 }
814
815
816 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
817 assert(src->is_register(), "should not call otherwise");
818 assert(dest->is_register(), "should not call otherwise");
819
820 // move between cpu-registers
821 if (dest->is_single_cpu()) {
822 #ifdef _LP64
823 if (src->type() == T_LONG) {
824 // Can do LONG -> OBJECT
825 move_regs(src->as_register_lo(), dest->as_register());
826 return;
827 }
828 #endif
829 assert(src->is_single_cpu(), "must match");
830 if (src->type() == T_OBJECT) {
831 __ verify_oop(src->as_register());
832 }
833 move_regs(src->as_register(), dest->as_register());
834
835 } else if (dest->is_double_cpu()) {
836 #ifdef _LP64
837 if (is_reference_type(src->type())) {
838 // Surprising to me but we can see move of a long to t_object
839 __ verify_oop(src->as_register());
840 move_regs(src->as_register(), dest->as_register_lo());
841 return;
842 }
843 #endif
844 assert(src->is_double_cpu(), "must match");
845 Register f_lo = src->as_register_lo();
846 Register f_hi = src->as_register_hi();
847 Register t_lo = dest->as_register_lo();
848 Register t_hi = dest->as_register_hi();
849 #ifdef _LP64
850 assert(f_hi == f_lo, "must be same");
851 assert(t_hi == t_lo, "must be same");
852 move_regs(f_lo, t_lo);
853 #else
854 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
855
856
857 if (f_lo == t_hi && f_hi == t_lo) {
858 swap_reg(f_lo, f_hi);
859 } else if (f_hi == t_lo) {
860 assert(f_lo != t_hi, "overwriting register");
861 move_regs(f_hi, t_hi);
862 move_regs(f_lo, t_lo);
863 } else {
864 assert(f_hi != t_lo, "overwriting register");
865 move_regs(f_lo, t_lo);
866 move_regs(f_hi, t_hi);
867 }
868 #endif // LP64
869
870 #ifndef _LP64
871 // special moves from fpu-register to xmm-register
872 // necessary for method results
873 } else if (src->is_single_xmm() && !dest->is_single_xmm()) {
874 __ movflt(Address(rsp, 0), src->as_xmm_float_reg());
875 __ fld_s(Address(rsp, 0));
876 } else if (src->is_double_xmm() && !dest->is_double_xmm()) {
877 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg());
878 __ fld_d(Address(rsp, 0));
879 } else if (dest->is_single_xmm() && !src->is_single_xmm()) {
880 __ fstp_s(Address(rsp, 0));
881 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));
882 } else if (dest->is_double_xmm() && !src->is_double_xmm()) {
883 __ fstp_d(Address(rsp, 0));
884 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
885 #endif // !_LP64
886
887 // move between xmm-registers
888 } else if (dest->is_single_xmm()) {
889 assert(src->is_single_xmm(), "must match");
890 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
891 } else if (dest->is_double_xmm()) {
892 assert(src->is_double_xmm(), "must match");
893 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
894
895 #ifndef _LP64
896 // move between fpu-registers (no instruction necessary because of fpu-stack)
897 } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
898 assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
899 assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
900 #endif // !_LP64
901
902 } else {
903 ShouldNotReachHere();
904 }
905 }
906
907 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
908 assert(src->is_register(), "should not call otherwise");
909 assert(dest->is_stack(), "should not call otherwise");
910
911 if (src->is_single_cpu()) {
912 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
913 if (is_reference_type(type)) {
914 __ verify_oop(src->as_register());
915 __ movptr (dst, src->as_register());
916 } else if (type == T_METADATA || type == T_ADDRESS) {
917 __ movptr (dst, src->as_register());
918 } else {
919 __ movl (dst, src->as_register());
920 }
921
922 } else if (src->is_double_cpu()) {
923 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
924 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
925 __ movptr (dstLO, src->as_register_lo());
926 NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
927
928 } else if (src->is_single_xmm()) {
929 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
930 __ movflt(dst_addr, src->as_xmm_float_reg());
931
932 } else if (src->is_double_xmm()) {
933 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
934 __ movdbl(dst_addr, src->as_xmm_double_reg());
935
936 #ifndef _LP64
937 } else if (src->is_single_fpu()) {
938 assert(src->fpu_regnr() == 0, "argument must be on TOS");
939 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
940 if (pop_fpu_stack) __ fstp_s (dst_addr);
941 else __ fst_s (dst_addr);
942
943 } else if (src->is_double_fpu()) {
944 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
945 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
946 if (pop_fpu_stack) __ fstp_d (dst_addr);
947 else __ fst_d (dst_addr);
948 #endif // !_LP64
949
950 } else {
951 ShouldNotReachHere();
952 }
953 }
954
955
956 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
957 LIR_Address* to_addr = dest->as_address_ptr();
958 PatchingStub* patch = NULL;
959 Register compressed_src = rscratch1;
960
961 if (is_reference_type(type)) {
962 __ verify_oop(src->as_register());
963 #ifdef _LP64
964 if (UseCompressedOops && !wide) {
965 __ movptr(compressed_src, src->as_register());
966 __ encode_heap_oop(compressed_src);
967 if (patch_code != lir_patch_none) {
968 info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
969 }
970 }
971 #endif
972 }
973
974 if (patch_code != lir_patch_none) {
975 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
976 Address toa = as_Address(to_addr);
977 assert(toa.disp() != 0, "must have");
978 }
979
980 int null_check_here = code_offset();
981 switch (type) {
982 case T_FLOAT: {
983 #ifdef _LP64
984 assert(src->is_single_xmm(), "not a float");
985 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
986 #else
987 if (src->is_single_xmm()) {
988 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
989 } else {
990 assert(src->is_single_fpu(), "must be");
991 assert(src->fpu_regnr() == 0, "argument must be on TOS");
992 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
993 else __ fst_s (as_Address(to_addr));
994 }
995 #endif // _LP64
996 break;
997 }
998
999 case T_DOUBLE: {
1000 #ifdef _LP64
1001 assert(src->is_double_xmm(), "not a double");
1002 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1003 #else
1004 if (src->is_double_xmm()) {
1005 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1006 } else {
1007 assert(src->is_double_fpu(), "must be");
1008 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1009 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));
1010 else __ fst_d (as_Address(to_addr));
1011 }
1012 #endif // _LP64
1013 break;
1014 }
1015
1016 case T_ARRAY: // fall through
1017 case T_OBJECT: // fall through
1018 if (UseCompressedOops && !wide) {
1019 __ movl(as_Address(to_addr), compressed_src);
1020 } else {
1021 __ movptr(as_Address(to_addr), src->as_register());
1022 }
1023 break;
1024 case T_METADATA:
1025 // We get here to store a method pointer to the stack to pass to
1026 // a dtrace runtime call. This can't work on 64 bit with
1027 // compressed klass ptrs: T_METADATA can be a compressed klass
1028 // ptr or a 64 bit method pointer.
1029 LP64_ONLY(ShouldNotReachHere());
1030 __ movptr(as_Address(to_addr), src->as_register());
1031 break;
1032 case T_ADDRESS:
1033 __ movptr(as_Address(to_addr), src->as_register());
1034 break;
1035 case T_INT:
1036 __ movl(as_Address(to_addr), src->as_register());
1037 break;
1038
1039 case T_LONG: {
1040 Register from_lo = src->as_register_lo();
1041 Register from_hi = src->as_register_hi();
1042 #ifdef _LP64
1043 __ movptr(as_Address_lo(to_addr), from_lo);
1044 #else
1045 Register base = to_addr->base()->as_register();
1046 Register index = noreg;
1047 if (to_addr->index()->is_register()) {
1048 index = to_addr->index()->as_register();
1049 }
1050 if (base == from_lo || index == from_lo) {
1051 assert(base != from_hi, "can't be");
1052 assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1053 __ movl(as_Address_hi(to_addr), from_hi);
1054 if (patch != NULL) {
1055 patching_epilog(patch, lir_patch_high, base, info);
1056 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1057 patch_code = lir_patch_low;
1058 }
1059 __ movl(as_Address_lo(to_addr), from_lo);
1060 } else {
1061 assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1062 __ movl(as_Address_lo(to_addr), from_lo);
1063 if (patch != NULL) {
1064 patching_epilog(patch, lir_patch_low, base, info);
1065 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1066 patch_code = lir_patch_high;
1067 }
1068 __ movl(as_Address_hi(to_addr), from_hi);
1069 }
1070 #endif // _LP64
1071 break;
1072 }
1073
1074 case T_BYTE: // fall through
1075 case T_BOOLEAN: {
1076 Register src_reg = src->as_register();
1077 Address dst_addr = as_Address(to_addr);
1078 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1079 __ movb(dst_addr, src_reg);
1080 break;
1081 }
1082
1083 case T_CHAR: // fall through
1084 case T_SHORT:
1085 __ movw(as_Address(to_addr), src->as_register());
1086 break;
1087
1088 default:
1089 ShouldNotReachHere();
1090 }
1091 if (info != NULL) {
1092 add_debug_info_for_null_check(null_check_here, info);
1093 }
1094
1095 if (patch_code != lir_patch_none) {
1096 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1097 }
1098 }
1099
1100
1101 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1102 assert(src->is_stack(), "should not call otherwise");
1103 assert(dest->is_register(), "should not call otherwise");
1104
1105 if (dest->is_single_cpu()) {
1106 if (is_reference_type(type)) {
1107 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1108 __ verify_oop(dest->as_register());
1109 } else if (type == T_METADATA || type == T_ADDRESS) {
1110 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1111 } else {
1112 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1113 }
1114
1115 } else if (dest->is_double_cpu()) {
1116 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1117 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1118 __ movptr(dest->as_register_lo(), src_addr_LO);
1119 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1120
1121 } else if (dest->is_single_xmm()) {
1122 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1123 __ movflt(dest->as_xmm_float_reg(), src_addr);
1124
1125 } else if (dest->is_double_xmm()) {
1126 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1127 __ movdbl(dest->as_xmm_double_reg(), src_addr);
1128
1129 #ifndef _LP64
1130 } else if (dest->is_single_fpu()) {
1131 assert(dest->fpu_regnr() == 0, "dest must be TOS");
1132 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1133 __ fld_s(src_addr);
1134
1135 } else if (dest->is_double_fpu()) {
1136 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1137 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1138 __ fld_d(src_addr);
1139 #endif // _LP64
1140
1141 } else {
1142 ShouldNotReachHere();
1143 }
1144 }
1145
1146
1147 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1148 if (src->is_single_stack()) {
1149 if (is_reference_type(type)) {
1150 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1151 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1152 } else {
1153 #ifndef _LP64
1154 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1155 __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1156 #else
1157 //no pushl on 64bits
1158 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1159 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1160 #endif
1161 }
1162
1163 } else if (src->is_double_stack()) {
1164 #ifdef _LP64
1165 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1166 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1167 #else
1168 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1169 // push and pop the part at src + wordSize, adding wordSize for the previous push
1170 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1171 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1172 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1173 #endif // _LP64
1174
1175 } else {
1176 ShouldNotReachHere();
1177 }
1178 }
1179
1180
1181 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1182 assert(src->is_address(), "should not call otherwise");
1183 assert(dest->is_register(), "should not call otherwise");
1184
1185 LIR_Address* addr = src->as_address_ptr();
1186 Address from_addr = as_Address(addr);
1187
1188 if (addr->base()->type() == T_OBJECT) {
1189 __ verify_oop(addr->base()->as_pointer_register());
1190 }
1191
1192 switch (type) {
1193 case T_BOOLEAN: // fall through
1194 case T_BYTE: // fall through
1195 case T_CHAR: // fall through
1196 case T_SHORT:
1197 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1198 // on pre P6 processors we may get partial register stalls
1199 // so blow away the value of to_rinfo before loading a
1200 // partial word into it. Do it here so that it precedes
1201 // the potential patch point below.
1202 __ xorptr(dest->as_register(), dest->as_register());
1203 }
1204 break;
1205 default:
1206 break;
1207 }
1208
1209 PatchingStub* patch = NULL;
1210 if (patch_code != lir_patch_none) {
1211 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1212 assert(from_addr.disp() != 0, "must have");
1213 }
1214 if (info != NULL) {
1215 add_debug_info_for_null_check_here(info);
1216 }
1217
1218 switch (type) {
1219 case T_FLOAT: {
1220 if (dest->is_single_xmm()) {
1221 __ movflt(dest->as_xmm_float_reg(), from_addr);
1222 } else {
1223 #ifndef _LP64
1224 assert(dest->is_single_fpu(), "must be");
1225 assert(dest->fpu_regnr() == 0, "dest must be TOS");
1226 __ fld_s(from_addr);
1227 #else
1228 ShouldNotReachHere();
1229 #endif // !LP64
1230 }
1231 break;
1232 }
1233
1234 case T_DOUBLE: {
1235 if (dest->is_double_xmm()) {
1236 __ movdbl(dest->as_xmm_double_reg(), from_addr);
1237 } else {
1238 #ifndef _LP64
1239 assert(dest->is_double_fpu(), "must be");
1240 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1241 __ fld_d(from_addr);
1242 #else
1243 ShouldNotReachHere();
1244 #endif // !LP64
1245 }
1246 break;
1247 }
1248
1249 case T_OBJECT: // fall through
1250 case T_ARRAY: // fall through
1251 if (UseCompressedOops && !wide) {
1252 __ movl(dest->as_register(), from_addr);
1253 } else {
1254 __ movptr(dest->as_register(), from_addr);
1255 }
1256 break;
1257
1258 case T_ADDRESS:
1259 __ movptr(dest->as_register(), from_addr);
1260 break;
1261 case T_INT:
1262 __ movl(dest->as_register(), from_addr);
1263 break;
1264
1265 case T_LONG: {
1266 Register to_lo = dest->as_register_lo();
1267 Register to_hi = dest->as_register_hi();
1268 #ifdef _LP64
1269 __ movptr(to_lo, as_Address_lo(addr));
1270 #else
1271 Register base = addr->base()->as_register();
1272 Register index = noreg;
1273 if (addr->index()->is_register()) {
1274 index = addr->index()->as_register();
1275 }
1276 if ((base == to_lo && index == to_hi) ||
1277 (base == to_hi && index == to_lo)) {
1278 // addresses with 2 registers are only formed as a result of
1279 // array access so this code will never have to deal with
1280 // patches or null checks.
1281 assert(info == NULL && patch == NULL, "must be");
1282 __ lea(to_hi, as_Address(addr));
1283 __ movl(to_lo, Address(to_hi, 0));
1284 __ movl(to_hi, Address(to_hi, BytesPerWord));
1285 } else if (base == to_lo || index == to_lo) {
1286 assert(base != to_hi, "can't be");
1287 assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1288 __ movl(to_hi, as_Address_hi(addr));
1289 if (patch != NULL) {
1290 patching_epilog(patch, lir_patch_high, base, info);
1291 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1292 patch_code = lir_patch_low;
1293 }
1294 __ movl(to_lo, as_Address_lo(addr));
1295 } else {
1296 assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1297 __ movl(to_lo, as_Address_lo(addr));
1298 if (patch != NULL) {
1299 patching_epilog(patch, lir_patch_low, base, info);
1300 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1301 patch_code = lir_patch_high;
1302 }
1303 __ movl(to_hi, as_Address_hi(addr));
1304 }
1305 #endif // _LP64
1306 break;
1307 }
1308
1309 case T_BOOLEAN: // fall through
1310 case T_BYTE: {
1311 Register dest_reg = dest->as_register();
1312 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1313 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1314 __ movsbl(dest_reg, from_addr);
1315 } else {
1316 __ movb(dest_reg, from_addr);
1317 __ shll(dest_reg, 24);
1318 __ sarl(dest_reg, 24);
1319 }
1320 break;
1321 }
1322
1323 case T_CHAR: {
1324 Register dest_reg = dest->as_register();
1325 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1326 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1327 __ movzwl(dest_reg, from_addr);
1328 } else {
1329 __ movw(dest_reg, from_addr);
1330 }
1331 break;
1332 }
1333
1334 case T_SHORT: {
1335 Register dest_reg = dest->as_register();
1336 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1337 __ movswl(dest_reg, from_addr);
1338 } else {
1339 __ movw(dest_reg, from_addr);
1340 __ shll(dest_reg, 16);
1341 __ sarl(dest_reg, 16);
1342 }
1343 break;
1344 }
1345
1346 default:
1347 ShouldNotReachHere();
1348 }
1349
1350 if (patch != NULL) {
1351 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1352 }
1353
1354 if (is_reference_type(type)) {
1355 #ifdef _LP64
1356 if (UseCompressedOops && !wide) {
1357 __ decode_heap_oop(dest->as_register());
1358 }
1359 #endif
1360
1361 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1362 if (!UseZGC) {
1363 __ verify_oop(dest->as_register());
1364 }
1365 }
1366 }
1367
1368
1369 NEEDS_CLEANUP; // This could be static?
1370 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1371 int elem_size = type2aelembytes(type);
1372 switch (elem_size) {
1373 case 1: return Address::times_1;
1374 case 2: return Address::times_2;
1375 case 4: return Address::times_4;
1376 case 8: return Address::times_8;
1377 }
1378 ShouldNotReachHere();
1379 return Address::no_scale;
1380 }
1381
1382
1383 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1384 switch (op->code()) {
1385 case lir_idiv:
1386 case lir_irem:
1387 arithmetic_idiv(op->code(),
1388 op->in_opr1(),
1389 op->in_opr2(),
1390 op->in_opr3(),
1391 op->result_opr(),
1392 op->info());
1393 break;
1394 case lir_fmad:
1395 __ fmad(op->result_opr()->as_xmm_double_reg(),
1396 op->in_opr1()->as_xmm_double_reg(),
1397 op->in_opr2()->as_xmm_double_reg(),
1398 op->in_opr3()->as_xmm_double_reg());
1399 break;
1400 case lir_fmaf:
1401 __ fmaf(op->result_opr()->as_xmm_float_reg(),
1402 op->in_opr1()->as_xmm_float_reg(),
1403 op->in_opr2()->as_xmm_float_reg(),
1404 op->in_opr3()->as_xmm_float_reg());
1405 break;
1406 default: ShouldNotReachHere(); break;
1407 }
1408 }
1409
1410 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1411 #ifdef ASSERT
1412 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1413 if (op->block() != NULL) _branch_target_blocks.append(op->block());
1414 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1415 #endif
1416
1417 if (op->cond() == lir_cond_always) {
1418 if (op->info() != NULL) add_debug_info_for_branch(op->info());
1419 __ jmp (*(op->label()));
1420 } else {
1421 Assembler::Condition acond = Assembler::zero;
1422 if (op->code() == lir_cond_float_branch) {
1423 assert(op->ublock() != NULL, "must have unordered successor");
1424 __ jcc(Assembler::parity, *(op->ublock()->label()));
1425 switch(op->cond()) {
1426 case lir_cond_equal: acond = Assembler::equal; break;
1427 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1428 case lir_cond_less: acond = Assembler::below; break;
1429 case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1430 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1431 case lir_cond_greater: acond = Assembler::above; break;
1432 default: ShouldNotReachHere();
1433 }
1434 } else {
1435 switch (op->cond()) {
1436 case lir_cond_equal: acond = Assembler::equal; break;
1437 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1438 case lir_cond_less: acond = Assembler::less; break;
1439 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1440 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1441 case lir_cond_greater: acond = Assembler::greater; break;
1442 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1443 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1444 default: ShouldNotReachHere();
1445 }
1446 }
1447 __ jcc(acond,*(op->label()));
1448 }
1449 }
1450
1451 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1452 LIR_Opr src = op->in_opr();
1453 LIR_Opr dest = op->result_opr();
1454
1455 switch (op->bytecode()) {
1456 case Bytecodes::_i2l:
1457 #ifdef _LP64
1458 __ movl2ptr(dest->as_register_lo(), src->as_register());
1459 #else
1460 move_regs(src->as_register(), dest->as_register_lo());
1461 move_regs(src->as_register(), dest->as_register_hi());
1462 __ sarl(dest->as_register_hi(), 31);
1463 #endif // LP64
1464 break;
1465
1466 case Bytecodes::_l2i:
1467 #ifdef _LP64
1468 __ movl(dest->as_register(), src->as_register_lo());
1469 #else
1470 move_regs(src->as_register_lo(), dest->as_register());
1471 #endif
1472 break;
1473
1474 case Bytecodes::_i2b:
1475 move_regs(src->as_register(), dest->as_register());
1476 __ sign_extend_byte(dest->as_register());
1477 break;
1478
1479 case Bytecodes::_i2c:
1480 move_regs(src->as_register(), dest->as_register());
1481 __ andl(dest->as_register(), 0xFFFF);
1482 break;
1483
1484 case Bytecodes::_i2s:
1485 move_regs(src->as_register(), dest->as_register());
1486 __ sign_extend_short(dest->as_register());
1487 break;
1488
1489
1490 #ifdef _LP64
1491 case Bytecodes::_f2d:
1492 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1493 break;
1494
1495 case Bytecodes::_d2f:
1496 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1497 break;
1498
1499 case Bytecodes::_i2f:
1500 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1501 break;
1502
1503 case Bytecodes::_i2d:
1504 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1505 break;
1506
1507 case Bytecodes::_l2f:
1508 __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo());
1509 break;
1510
1511 case Bytecodes::_l2d:
1512 __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo());
1513 break;
1514
1515 case Bytecodes::_f2i:
1516 __ convert_f2i(dest->as_register(), src->as_xmm_float_reg());
1517 break;
1518
1519 case Bytecodes::_d2i:
1520 __ convert_d2i(dest->as_register(), src->as_xmm_double_reg());
1521 break;
1522
1523 case Bytecodes::_f2l:
1524 __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg());
1525 break;
1526
1527 case Bytecodes::_d2l:
1528 __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg());
1529 break;
1530 #else
1531 case Bytecodes::_f2d:
1532 case Bytecodes::_d2f:
1533 if (dest->is_single_xmm()) {
1534 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1535 } else if (dest->is_double_xmm()) {
1536 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1537 } else {
1538 assert(src->fpu() == dest->fpu(), "register must be equal");
1539 // do nothing (float result is rounded later through spilling)
1540 }
1541 break;
1542
1543 case Bytecodes::_i2f:
1544 case Bytecodes::_i2d:
1545 if (dest->is_single_xmm()) {
1546 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1547 } else if (dest->is_double_xmm()) {
1548 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1549 } else {
1550 assert(dest->fpu() == 0, "result must be on TOS");
1551 __ movl(Address(rsp, 0), src->as_register());
1552 __ fild_s(Address(rsp, 0));
1553 }
1554 break;
1555
1556 case Bytecodes::_l2f:
1557 case Bytecodes::_l2d:
1558 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
1559 assert(dest->fpu() == 0, "result must be on TOS");
1560 __ movptr(Address(rsp, 0), src->as_register_lo());
1561 __ movl(Address(rsp, BytesPerWord), src->as_register_hi());
1562 __ fild_d(Address(rsp, 0));
1563 // float result is rounded later through spilling
1564 break;
1565
1566 case Bytecodes::_f2i:
1567 case Bytecodes::_d2i:
1568 if (src->is_single_xmm()) {
1569 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
1570 } else if (src->is_double_xmm()) {
1571 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
1572 } else {
1573 assert(src->fpu() == 0, "input must be on TOS");
1574 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_trunc()));
1575 __ fist_s(Address(rsp, 0));
1576 __ movl(dest->as_register(), Address(rsp, 0));
1577 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
1578 }
1579 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
1580 assert(op->stub() != NULL, "stub required");
1581 __ cmpl(dest->as_register(), 0x80000000);
1582 __ jcc(Assembler::equal, *op->stub()->entry());
1583 __ bind(*op->stub()->continuation());
1584 break;
1585
1586 case Bytecodes::_f2l:
1587 case Bytecodes::_d2l:
1588 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
1589 assert(src->fpu() == 0, "input must be on TOS");
1590 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
1591
1592 // instruction sequence too long to inline it here
1593 {
1594 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
1595 }
1596 break;
1597 #endif // _LP64
1598
1599 default: ShouldNotReachHere();
1600 }
1601 }
1602
1603 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1604 if (op->init_check()) {
1605 add_debug_info_for_null_check_here(op->stub()->info());
1606 __ cmpb(Address(op->klass()->as_register(),
1607 InstanceKlass::init_state_offset()),
1608 InstanceKlass::fully_initialized);
1609 __ jcc(Assembler::notEqual, *op->stub()->entry());
1610 }
1611 __ allocate_object(op->obj()->as_register(),
1612 op->tmp1()->as_register(),
1613 op->tmp2()->as_register(),
1614 op->header_size(),
1615 op->object_size(),
1616 op->klass()->as_register(),
1617 *op->stub()->entry());
1618 __ bind(*op->stub()->continuation());
1619 }
1620
1621 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1622 Register len = op->len()->as_register();
1623 LP64_ONLY( __ movslq(len, len); )
1624
1625 if (UseSlowPath ||
1626 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1627 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1628 __ jmp(*op->stub()->entry());
1629 } else {
1630 Register tmp1 = op->tmp1()->as_register();
1631 Register tmp2 = op->tmp2()->as_register();
1632 Register tmp3 = op->tmp3()->as_register();
1633 if (len == tmp1) {
1634 tmp1 = tmp3;
1635 } else if (len == tmp2) {
1636 tmp2 = tmp3;
1637 } else if (len == tmp3) {
1638 // everything is ok
1639 } else {
1640 __ mov(tmp3, len);
1641 }
1642 __ allocate_array(op->obj()->as_register(),
1643 len,
1644 tmp1,
1645 tmp2,
1646 arrayOopDesc::base_offset_in_bytes(op->type()),
1647 array_element_size(op->type()),
1648 op->klass()->as_register(),
1649 *op->stub()->entry());
1650 }
1651 __ bind(*op->stub()->continuation());
1652 }
1653
1654 void LIR_Assembler::type_profile_helper(Register mdo,
1655 ciMethodData *md, ciProfileData *data,
1656 Register recv, Label* update_done) {
1657 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1658 Label next_test;
1659 // See if the receiver is receiver[n].
1660 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1661 __ jccb(Assembler::notEqual, next_test);
1662 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1663 __ addptr(data_addr, DataLayout::counter_increment);
1664 __ jmp(*update_done);
1665 __ bind(next_test);
1666 }
1667
1668 // Didn't find receiver; find next empty slot and fill it in
1669 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1670 Label next_test;
1671 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1672 __ cmpptr(recv_addr, (intptr_t)NULL_WORD);
1673 __ jccb(Assembler::notEqual, next_test);
1674 __ movptr(recv_addr, recv);
1675 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
1676 __ jmp(*update_done);
1677 __ bind(next_test);
1678 }
1679 }
1680
1681 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1682 // we always need a stub for the failure case.
1683 CodeStub* stub = op->stub();
1684 Register obj = op->object()->as_register();
1685 Register k_RInfo = op->tmp1()->as_register();
1686 Register klass_RInfo = op->tmp2()->as_register();
1687 Register dst = op->result_opr()->as_register();
1688 ciKlass* k = op->klass();
1689 Register Rtmp1 = noreg;
1690 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1691
1692 // check if it needs to be profiled
1693 ciMethodData* md = NULL;
1694 ciProfileData* data = NULL;
1695
1696 if (op->should_profile()) {
1697 ciMethod* method = op->profiled_method();
1698 assert(method != NULL, "Should have method");
1699 int bci = op->profiled_bci();
1700 md = method->method_data_or_null();
1701 assert(md != NULL, "Sanity");
1702 data = md->bci_to_data(bci);
1703 assert(data != NULL, "need data for type check");
1704 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1705 }
1706 Label profile_cast_success, profile_cast_failure;
1707 Label *success_target = op->should_profile() ? &profile_cast_success : success;
1708 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1709
1710 if (obj == k_RInfo) {
1711 k_RInfo = dst;
1712 } else if (obj == klass_RInfo) {
1713 klass_RInfo = dst;
1714 }
1715 if (k->is_loaded() && !UseCompressedClassPointers) {
1716 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1717 } else {
1718 Rtmp1 = op->tmp3()->as_register();
1719 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1720 }
1721
1722 assert_different_registers(obj, k_RInfo, klass_RInfo);
1723
1724 __ cmpptr(obj, (int32_t)NULL_WORD);
1725 if (op->should_profile()) {
1726 Label not_null;
1727 __ jccb(Assembler::notEqual, not_null);
1728 // Object is null; update MDO and exit
1729 Register mdo = klass_RInfo;
1730 __ mov_metadata(mdo, md->constant_encoding());
1731 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1732 int header_bits = BitData::null_seen_byte_constant();
1733 __ orb(data_addr, header_bits);
1734 __ jmp(*obj_is_null);
1735 __ bind(not_null);
1736 } else {
1737 __ jcc(Assembler::equal, *obj_is_null);
1738 }
1739
1740 if (!k->is_loaded()) {
1741 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1742 } else {
1743 #ifdef _LP64
1744 __ mov_metadata(k_RInfo, k->constant_encoding());
1745 #endif // _LP64
1746 }
1747 __ verify_oop(obj);
1748
1749 if (op->fast_check()) {
1750 // get object class
1751 // not a safepoint as obj null check happens earlier
1752 #ifdef _LP64
1753 if (UseCompressedClassPointers) {
1754 __ load_klass(Rtmp1, obj, tmp_load_klass);
1755 __ cmpptr(k_RInfo, Rtmp1);
1756 } else {
1757 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1758 }
1759 #else
1760 if (k->is_loaded()) {
1761 __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1762 } else {
1763 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1764 }
1765 #endif
1766 __ jcc(Assembler::notEqual, *failure_target);
1767 // successful cast, fall through to profile or jump
1768 } else {
1769 // get object class
1770 // not a safepoint as obj null check happens earlier
1771 __ load_klass(klass_RInfo, obj, tmp_load_klass);
1772 if (k->is_loaded()) {
1773 // See if we get an immediate positive hit
1774 #ifdef _LP64
1775 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1776 #else
1777 __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1778 #endif // _LP64
1779 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1780 __ jcc(Assembler::notEqual, *failure_target);
1781 // successful cast, fall through to profile or jump
1782 } else {
1783 // See if we get an immediate positive hit
1784 __ jcc(Assembler::equal, *success_target);
1785 // check for self
1786 #ifdef _LP64
1787 __ cmpptr(klass_RInfo, k_RInfo);
1788 #else
1789 __ cmpklass(klass_RInfo, k->constant_encoding());
1790 #endif // _LP64
1791 __ jcc(Assembler::equal, *success_target);
1792
1793 __ push(klass_RInfo);
1794 #ifdef _LP64
1795 __ push(k_RInfo);
1796 #else
1797 __ pushklass(k->constant_encoding());
1798 #endif // _LP64
1799 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1800 __ pop(klass_RInfo);
1801 __ pop(klass_RInfo);
1802 // result is a boolean
1803 __ cmpl(klass_RInfo, 0);
1804 __ jcc(Assembler::equal, *failure_target);
1805 // successful cast, fall through to profile or jump
1806 }
1807 } else {
1808 // perform the fast part of the checking logic
1809 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1810 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1811 __ push(klass_RInfo);
1812 __ push(k_RInfo);
1813 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1814 __ pop(klass_RInfo);
1815 __ pop(k_RInfo);
1816 // result is a boolean
1817 __ cmpl(k_RInfo, 0);
1818 __ jcc(Assembler::equal, *failure_target);
1819 // successful cast, fall through to profile or jump
1820 }
1821 }
1822 if (op->should_profile()) {
1823 Register mdo = klass_RInfo, recv = k_RInfo;
1824 __ bind(profile_cast_success);
1825 __ mov_metadata(mdo, md->constant_encoding());
1826 __ load_klass(recv, obj, tmp_load_klass);
1827 type_profile_helper(mdo, md, data, recv, success);
1828 __ jmp(*success);
1829
1830 __ bind(profile_cast_failure);
1831 __ mov_metadata(mdo, md->constant_encoding());
1832 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1833 __ subptr(counter_addr, DataLayout::counter_increment);
1834 __ jmp(*failure);
1835 }
1836 __ jmp(*success);
1837 }
1838
1839
1840 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1841 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1842 LIR_Code code = op->code();
1843 if (code == lir_store_check) {
1844 Register value = op->object()->as_register();
1845 Register array = op->array()->as_register();
1846 Register k_RInfo = op->tmp1()->as_register();
1847 Register klass_RInfo = op->tmp2()->as_register();
1848 Register Rtmp1 = op->tmp3()->as_register();
1849
1850 CodeStub* stub = op->stub();
1851
1852 // check if it needs to be profiled
1853 ciMethodData* md = NULL;
1854 ciProfileData* data = NULL;
1855
1856 if (op->should_profile()) {
1857 ciMethod* method = op->profiled_method();
1858 assert(method != NULL, "Should have method");
1859 int bci = op->profiled_bci();
1860 md = method->method_data_or_null();
1861 assert(md != NULL, "Sanity");
1862 data = md->bci_to_data(bci);
1863 assert(data != NULL, "need data for type check");
1864 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1865 }
1866 Label profile_cast_success, profile_cast_failure, done;
1867 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1868 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1869
1870 __ cmpptr(value, (int32_t)NULL_WORD);
1871 if (op->should_profile()) {
1872 Label not_null;
1873 __ jccb(Assembler::notEqual, not_null);
1874 // Object is null; update MDO and exit
1875 Register mdo = klass_RInfo;
1876 __ mov_metadata(mdo, md->constant_encoding());
1877 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1878 int header_bits = BitData::null_seen_byte_constant();
1879 __ orb(data_addr, header_bits);
1880 __ jmp(done);
1881 __ bind(not_null);
1882 } else {
1883 __ jcc(Assembler::equal, done);
1884 }
1885
1886 add_debug_info_for_null_check_here(op->info_for_exception());
1887 __ load_klass(k_RInfo, array, tmp_load_klass);
1888 __ load_klass(klass_RInfo, value, tmp_load_klass);
1889
1890 // get instance klass (it's already uncompressed)
1891 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1892 // perform the fast part of the checking logic
1893 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1894 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1895 __ push(klass_RInfo);
1896 __ push(k_RInfo);
1897 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1898 __ pop(klass_RInfo);
1899 __ pop(k_RInfo);
1900 // result is a boolean
1901 __ cmpl(k_RInfo, 0);
1902 __ jcc(Assembler::equal, *failure_target);
1903 // fall through to the success case
1904
1905 if (op->should_profile()) {
1906 Register mdo = klass_RInfo, recv = k_RInfo;
1907 __ bind(profile_cast_success);
1908 __ mov_metadata(mdo, md->constant_encoding());
1909 __ load_klass(recv, value, tmp_load_klass);
1910 type_profile_helper(mdo, md, data, recv, &done);
1911 __ jmpb(done);
1912
1913 __ bind(profile_cast_failure);
1914 __ mov_metadata(mdo, md->constant_encoding());
1915 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1916 __ subptr(counter_addr, DataLayout::counter_increment);
1917 __ jmp(*stub->entry());
1918 }
1919
1920 __ bind(done);
1921 } else
1922 if (code == lir_checkcast) {
1923 Register obj = op->object()->as_register();
1924 Register dst = op->result_opr()->as_register();
1925 Label success;
1926 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1927 __ bind(success);
1928 if (dst != obj) {
1929 __ mov(dst, obj);
1930 }
1931 } else
1932 if (code == lir_instanceof) {
1933 Register obj = op->object()->as_register();
1934 Register dst = op->result_opr()->as_register();
1935 Label success, failure, done;
1936 emit_typecheck_helper(op, &success, &failure, &failure);
1937 __ bind(failure);
1938 __ xorptr(dst, dst);
1939 __ jmpb(done);
1940 __ bind(success);
1941 __ movptr(dst, 1);
1942 __ bind(done);
1943 } else {
1944 ShouldNotReachHere();
1945 }
1946
1947 }
1948
1949
1950 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1951 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1952 assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1953 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1954 assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1955 assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1956 Register addr = op->addr()->as_register();
1957 __ lock();
1958 NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1959
1960 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1961 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1962 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1963 Register newval = op->new_value()->as_register();
1964 Register cmpval = op->cmp_value()->as_register();
1965 assert(cmpval == rax, "wrong register");
1966 assert(newval != NULL, "new val must be register");
1967 assert(cmpval != newval, "cmp and new values must be in different registers");
1968 assert(cmpval != addr, "cmp and addr must be in different registers");
1969 assert(newval != addr, "new value and addr must be in different registers");
1970
1971 if ( op->code() == lir_cas_obj) {
1972 #ifdef _LP64
1973 if (UseCompressedOops) {
1974 __ encode_heap_oop(cmpval);
1975 __ mov(rscratch1, newval);
1976 __ encode_heap_oop(rscratch1);
1977 __ lock();
1978 // cmpval (rax) is implicitly used by this instruction
1979 __ cmpxchgl(rscratch1, Address(addr, 0));
1980 } else
1981 #endif
1982 {
1983 __ lock();
1984 __ cmpxchgptr(newval, Address(addr, 0));
1985 }
1986 } else {
1987 assert(op->code() == lir_cas_int, "lir_cas_int expected");
1988 __ lock();
1989 __ cmpxchgl(newval, Address(addr, 0));
1990 }
1991 #ifdef _LP64
1992 } else if (op->code() == lir_cas_long) {
1993 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1994 Register newval = op->new_value()->as_register_lo();
1995 Register cmpval = op->cmp_value()->as_register_lo();
1996 assert(cmpval == rax, "wrong register");
1997 assert(newval != NULL, "new val must be register");
1998 assert(cmpval != newval, "cmp and new values must be in different registers");
1999 assert(cmpval != addr, "cmp and addr must be in different registers");
2000 assert(newval != addr, "new value and addr must be in different registers");
2001 __ lock();
2002 __ cmpxchgq(newval, Address(addr, 0));
2003 #endif // _LP64
2004 } else {
2005 Unimplemented();
2006 }
2007 }
2008
2009 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2010 Assembler::Condition acond, ncond;
2011 switch (condition) {
2012 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
2013 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
2014 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
2015 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
2016 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
2017 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
2018 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
2019 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
2020 default: acond = Assembler::equal; ncond = Assembler::notEqual;
2021 ShouldNotReachHere();
2022 }
2023
2024 if (opr1->is_cpu_register()) {
2025 reg2reg(opr1, result);
2026 } else if (opr1->is_stack()) {
2027 stack2reg(opr1, result, result->type());
2028 } else if (opr1->is_constant()) {
2029 const2reg(opr1, result, lir_patch_none, NULL);
2030 } else {
2031 ShouldNotReachHere();
2032 }
2033
2034 if (VM_Version::supports_cmov() && !opr2->is_constant()) {
2035 // optimized version that does not require a branch
2036 if (opr2->is_single_cpu()) {
2037 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
2038 __ cmov(ncond, result->as_register(), opr2->as_register());
2039 } else if (opr2->is_double_cpu()) {
2040 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2041 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2042 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
2043 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)
2044 } else if (opr2->is_single_stack()) {
2045 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
2046 } else if (opr2->is_double_stack()) {
2047 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
2048 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
2049 } else {
2050 ShouldNotReachHere();
2051 }
2052
2053 } else {
2054 Label skip;
2055 __ jcc (acond, skip);
2056 if (opr2->is_cpu_register()) {
2057 reg2reg(opr2, result);
2058 } else if (opr2->is_stack()) {
2059 stack2reg(opr2, result, result->type());
2060 } else if (opr2->is_constant()) {
2061 const2reg(opr2, result, lir_patch_none, NULL);
2062 } else {
2063 ShouldNotReachHere();
2064 }
2065 __ bind(skip);
2066 }
2067 }
2068
2069
2070 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
2071 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
2072
2073 if (left->is_single_cpu()) {
2074 assert(left == dest, "left and dest must be equal");
2075 Register lreg = left->as_register();
2076
2077 if (right->is_single_cpu()) {
2078 // cpu register - cpu register
2079 Register rreg = right->as_register();
2080 switch (code) {
2081 case lir_add: __ addl (lreg, rreg); break;
2082 case lir_sub: __ subl (lreg, rreg); break;
2083 case lir_mul: __ imull(lreg, rreg); break;
2084 default: ShouldNotReachHere();
2085 }
2086
2087 } else if (right->is_stack()) {
2088 // cpu register - stack
2089 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2090 switch (code) {
2091 case lir_add: __ addl(lreg, raddr); break;
2092 case lir_sub: __ subl(lreg, raddr); break;
2093 default: ShouldNotReachHere();
2094 }
2095
2096 } else if (right->is_constant()) {
2097 // cpu register - constant
2098 jint c = right->as_constant_ptr()->as_jint();
2099 switch (code) {
2100 case lir_add: {
2101 __ incrementl(lreg, c);
2102 break;
2103 }
2104 case lir_sub: {
2105 __ decrementl(lreg, c);
2106 break;
2107 }
2108 default: ShouldNotReachHere();
2109 }
2110
2111 } else {
2112 ShouldNotReachHere();
2113 }
2114
2115 } else if (left->is_double_cpu()) {
2116 assert(left == dest, "left and dest must be equal");
2117 Register lreg_lo = left->as_register_lo();
2118 Register lreg_hi = left->as_register_hi();
2119
2120 if (right->is_double_cpu()) {
2121 // cpu register - cpu register
2122 Register rreg_lo = right->as_register_lo();
2123 Register rreg_hi = right->as_register_hi();
2124 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));
2125 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));
2126 switch (code) {
2127 case lir_add:
2128 __ addptr(lreg_lo, rreg_lo);
2129 NOT_LP64(__ adcl(lreg_hi, rreg_hi));
2130 break;
2131 case lir_sub:
2132 __ subptr(lreg_lo, rreg_lo);
2133 NOT_LP64(__ sbbl(lreg_hi, rreg_hi));
2134 break;
2135 case lir_mul:
2136 #ifdef _LP64
2137 __ imulq(lreg_lo, rreg_lo);
2138 #else
2139 assert(lreg_lo == rax && lreg_hi == rdx, "must be");
2140 __ imull(lreg_hi, rreg_lo);
2141 __ imull(rreg_hi, lreg_lo);
2142 __ addl (rreg_hi, lreg_hi);
2143 __ mull (rreg_lo);
2144 __ addl (lreg_hi, rreg_hi);
2145 #endif // _LP64
2146 break;
2147 default:
2148 ShouldNotReachHere();
2149 }
2150
2151 } else if (right->is_constant()) {
2152 // cpu register - constant
2153 #ifdef _LP64
2154 jlong c = right->as_constant_ptr()->as_jlong_bits();
2155 __ movptr(r10, (intptr_t) c);
2156 switch (code) {
2157 case lir_add:
2158 __ addptr(lreg_lo, r10);
2159 break;
2160 case lir_sub:
2161 __ subptr(lreg_lo, r10);
2162 break;
2163 default:
2164 ShouldNotReachHere();
2165 }
2166 #else
2167 jint c_lo = right->as_constant_ptr()->as_jint_lo();
2168 jint c_hi = right->as_constant_ptr()->as_jint_hi();
2169 switch (code) {
2170 case lir_add:
2171 __ addptr(lreg_lo, c_lo);
2172 __ adcl(lreg_hi, c_hi);
2173 break;
2174 case lir_sub:
2175 __ subptr(lreg_lo, c_lo);
2176 __ sbbl(lreg_hi, c_hi);
2177 break;
2178 default:
2179 ShouldNotReachHere();
2180 }
2181 #endif // _LP64
2182
2183 } else {
2184 ShouldNotReachHere();
2185 }
2186
2187 } else if (left->is_single_xmm()) {
2188 assert(left == dest, "left and dest must be equal");
2189 XMMRegister lreg = left->as_xmm_float_reg();
2190
2191 if (right->is_single_xmm()) {
2192 XMMRegister rreg = right->as_xmm_float_reg();
2193 switch (code) {
2194 case lir_add: __ addss(lreg, rreg); break;
2195 case lir_sub: __ subss(lreg, rreg); break;
2196 case lir_mul: __ mulss(lreg, rreg); break;
2197 case lir_div: __ divss(lreg, rreg); break;
2198 default: ShouldNotReachHere();
2199 }
2200 } else {
2201 Address raddr;
2202 if (right->is_single_stack()) {
2203 raddr = frame_map()->address_for_slot(right->single_stack_ix());
2204 } else if (right->is_constant()) {
2205 // hack for now
2206 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
2207 } else {
2208 ShouldNotReachHere();
2209 }
2210 switch (code) {
2211 case lir_add: __ addss(lreg, raddr); break;
2212 case lir_sub: __ subss(lreg, raddr); break;
2213 case lir_mul: __ mulss(lreg, raddr); break;
2214 case lir_div: __ divss(lreg, raddr); break;
2215 default: ShouldNotReachHere();
2216 }
2217 }
2218
2219 } else if (left->is_double_xmm()) {
2220 assert(left == dest, "left and dest must be equal");
2221
2222 XMMRegister lreg = left->as_xmm_double_reg();
2223 if (right->is_double_xmm()) {
2224 XMMRegister rreg = right->as_xmm_double_reg();
2225 switch (code) {
2226 case lir_add: __ addsd(lreg, rreg); break;
2227 case lir_sub: __ subsd(lreg, rreg); break;
2228 case lir_mul: __ mulsd(lreg, rreg); break;
2229 case lir_div: __ divsd(lreg, rreg); break;
2230 default: ShouldNotReachHere();
2231 }
2232 } else {
2233 Address raddr;
2234 if (right->is_double_stack()) {
2235 raddr = frame_map()->address_for_slot(right->double_stack_ix());
2236 } else if (right->is_constant()) {
2237 // hack for now
2238 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2239 } else {
2240 ShouldNotReachHere();
2241 }
2242 switch (code) {
2243 case lir_add: __ addsd(lreg, raddr); break;
2244 case lir_sub: __ subsd(lreg, raddr); break;
2245 case lir_mul: __ mulsd(lreg, raddr); break;
2246 case lir_div: __ divsd(lreg, raddr); break;
2247 default: ShouldNotReachHere();
2248 }
2249 }
2250
2251 #ifndef _LP64
2252 } else if (left->is_single_fpu()) {
2253 assert(dest->is_single_fpu(), "fpu stack allocation required");
2254
2255 if (right->is_single_fpu()) {
2256 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
2257
2258 } else {
2259 assert(left->fpu_regnr() == 0, "left must be on TOS");
2260 assert(dest->fpu_regnr() == 0, "dest must be on TOS");
2261
2262 Address raddr;
2263 if (right->is_single_stack()) {
2264 raddr = frame_map()->address_for_slot(right->single_stack_ix());
2265 } else if (right->is_constant()) {
2266 address const_addr = float_constant(right->as_jfloat());
2267 assert(const_addr != NULL, "incorrect float/double constant maintainance");
2268 // hack for now
2269 raddr = __ as_Address(InternalAddress(const_addr));
2270 } else {
2271 ShouldNotReachHere();
2272 }
2273
2274 switch (code) {
2275 case lir_add: __ fadd_s(raddr); break;
2276 case lir_sub: __ fsub_s(raddr); break;
2277 case lir_mul: __ fmul_s(raddr); break;
2278 case lir_div: __ fdiv_s(raddr); break;
2279 default: ShouldNotReachHere();
2280 }
2281 }
2282
2283 } else if (left->is_double_fpu()) {
2284 assert(dest->is_double_fpu(), "fpu stack allocation required");
2285
2286 if (code == lir_mul || code == lir_div) {
2287 // Double values require special handling for strictfp mul/div on x86
2288 __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
2289 __ fmulp(left->fpu_regnrLo() + 1);
2290 }
2291
2292 if (right->is_double_fpu()) {
2293 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
2294
2295 } else {
2296 assert(left->fpu_regnrLo() == 0, "left must be on TOS");
2297 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");
2298
2299 Address raddr;
2300 if (right->is_double_stack()) {
2301 raddr = frame_map()->address_for_slot(right->double_stack_ix());
2302 } else if (right->is_constant()) {
2303 // hack for now
2304 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2305 } else {
2306 ShouldNotReachHere();
2307 }
2308
2309 switch (code) {
2310 case lir_add: __ fadd_d(raddr); break;
2311 case lir_sub: __ fsub_d(raddr); break;
2312 case lir_mul: __ fmul_d(raddr); break;
2313 case lir_div: __ fdiv_d(raddr); break;
2314 default: ShouldNotReachHere();
2315 }
2316 }
2317
2318 if (code == lir_mul || code == lir_div) {
2319 // Double values require special handling for strictfp mul/div on x86
2320 __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
2321 __ fmulp(dest->fpu_regnrLo() + 1);
2322 }
2323 #endif // !_LP64
2324
2325 } else if (left->is_single_stack() || left->is_address()) {
2326 assert(left == dest, "left and dest must be equal");
2327
2328 Address laddr;
2329 if (left->is_single_stack()) {
2330 laddr = frame_map()->address_for_slot(left->single_stack_ix());
2331 } else if (left->is_address()) {
2332 laddr = as_Address(left->as_address_ptr());
2333 } else {
2334 ShouldNotReachHere();
2335 }
2336
2337 if (right->is_single_cpu()) {
2338 Register rreg = right->as_register();
2339 switch (code) {
2340 case lir_add: __ addl(laddr, rreg); break;
2341 case lir_sub: __ subl(laddr, rreg); break;
2342 default: ShouldNotReachHere();
2343 }
2344 } else if (right->is_constant()) {
2345 jint c = right->as_constant_ptr()->as_jint();
2346 switch (code) {
2347 case lir_add: {
2348 __ incrementl(laddr, c);
2349 break;
2350 }
2351 case lir_sub: {
2352 __ decrementl(laddr, c);
2353 break;
2354 }
2355 default: ShouldNotReachHere();
2356 }
2357 } else {
2358 ShouldNotReachHere();
2359 }
2360
2361 } else {
2362 ShouldNotReachHere();
2363 }
2364 }
2365
2366 #ifndef _LP64
2367 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
2368 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR");
2369 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
2370 assert(left_index == 0 || right_index == 0, "either must be on top of stack");
2371
2372 bool left_is_tos = (left_index == 0);
2373 bool dest_is_tos = (dest_index == 0);
2374 int non_tos_index = (left_is_tos ? right_index : left_index);
2375
2376 switch (code) {
2377 case lir_add:
2378 if (pop_fpu_stack) __ faddp(non_tos_index);
2379 else if (dest_is_tos) __ fadd (non_tos_index);
2380 else __ fadda(non_tos_index);
2381 break;
2382
2383 case lir_sub:
2384 if (left_is_tos) {
2385 if (pop_fpu_stack) __ fsubrp(non_tos_index);
2386 else if (dest_is_tos) __ fsub (non_tos_index);
2387 else __ fsubra(non_tos_index);
2388 } else {
2389 if (pop_fpu_stack) __ fsubp (non_tos_index);
2390 else if (dest_is_tos) __ fsubr (non_tos_index);
2391 else __ fsuba (non_tos_index);
2392 }
2393 break;
2394
2395 case lir_mul:
2396 if (pop_fpu_stack) __ fmulp(non_tos_index);
2397 else if (dest_is_tos) __ fmul (non_tos_index);
2398 else __ fmula(non_tos_index);
2399 break;
2400
2401 case lir_div:
2402 if (left_is_tos) {
2403 if (pop_fpu_stack) __ fdivrp(non_tos_index);
2404 else if (dest_is_tos) __ fdiv (non_tos_index);
2405 else __ fdivra(non_tos_index);
2406 } else {
2407 if (pop_fpu_stack) __ fdivp (non_tos_index);
2408 else if (dest_is_tos) __ fdivr (non_tos_index);
2409 else __ fdiva (non_tos_index);
2410 }
2411 break;
2412
2413 case lir_rem:
2414 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
2415 __ fremr(noreg);
2416 break;
2417
2418 default:
2419 ShouldNotReachHere();
2420 }
2421 }
2422 #endif // _LP64
2423
2424
2425 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
2426 if (value->is_double_xmm()) {
2427 switch(code) {
2428 case lir_abs :
2429 {
2430 #ifdef _LP64
2431 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
2432 assert(tmp->is_valid(), "need temporary");
2433 __ vpandn(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), value->as_xmm_double_reg(), 2);
2434 } else
2435 #endif
2436 {
2437 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2438 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2439 }
2440 assert(!tmp->is_valid(), "do not need temporary");
2441 __ andpd(dest->as_xmm_double_reg(),
2442 ExternalAddress((address)double_signmask_pool));
2443 }
2444 }
2445 break;
2446
2447 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2448 // all other intrinsics are not available in the SSE instruction set, so FPU is used
2449 default : ShouldNotReachHere();
2450 }
2451
2452 #ifndef _LP64
2453 } else if (value->is_double_fpu()) {
2454 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2455 switch(code) {
2456 case lir_abs : __ fabs() ; break;
2457 case lir_sqrt : __ fsqrt(); break;
2458 default : ShouldNotReachHere();
2459 }
2460 #endif // !_LP64
2461 } else {
2462 Unimplemented();
2463 }
2464 }
2465
2466 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2467 // assert(left->destroys_register(), "check");
2468 if (left->is_single_cpu()) {
2469 Register reg = left->as_register();
2470 if (right->is_constant()) {
2471 int val = right->as_constant_ptr()->as_jint();
2472 switch (code) {
2473 case lir_logic_and: __ andl (reg, val); break;
2474 case lir_logic_or: __ orl (reg, val); break;
2475 case lir_logic_xor: __ xorl (reg, val); break;
2476 default: ShouldNotReachHere();
2477 }
2478 } else if (right->is_stack()) {
2479 // added support for stack operands
2480 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2481 switch (code) {
2482 case lir_logic_and: __ andl (reg, raddr); break;
2483 case lir_logic_or: __ orl (reg, raddr); break;
2484 case lir_logic_xor: __ xorl (reg, raddr); break;
2485 default: ShouldNotReachHere();
2486 }
2487 } else {
2488 Register rright = right->as_register();
2489 switch (code) {
2490 case lir_logic_and: __ andptr (reg, rright); break;
2491 case lir_logic_or : __ orptr (reg, rright); break;
2492 case lir_logic_xor: __ xorptr (reg, rright); break;
2493 default: ShouldNotReachHere();
2494 }
2495 }
2496 move_regs(reg, dst->as_register());
2497 } else {
2498 Register l_lo = left->as_register_lo();
2499 Register l_hi = left->as_register_hi();
2500 if (right->is_constant()) {
2501 #ifdef _LP64
2502 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
2503 switch (code) {
2504 case lir_logic_and:
2505 __ andq(l_lo, rscratch1);
2506 break;
2507 case lir_logic_or:
2508 __ orq(l_lo, rscratch1);
2509 break;
2510 case lir_logic_xor:
2511 __ xorq(l_lo, rscratch1);
2512 break;
2513 default: ShouldNotReachHere();
2514 }
2515 #else
2516 int r_lo = right->as_constant_ptr()->as_jint_lo();
2517 int r_hi = right->as_constant_ptr()->as_jint_hi();
2518 switch (code) {
2519 case lir_logic_and:
2520 __ andl(l_lo, r_lo);
2521 __ andl(l_hi, r_hi);
2522 break;
2523 case lir_logic_or:
2524 __ orl(l_lo, r_lo);
2525 __ orl(l_hi, r_hi);
2526 break;
2527 case lir_logic_xor:
2528 __ xorl(l_lo, r_lo);
2529 __ xorl(l_hi, r_hi);
2530 break;
2531 default: ShouldNotReachHere();
2532 }
2533 #endif // _LP64
2534 } else {
2535 #ifdef _LP64
2536 Register r_lo;
2537 if (is_reference_type(right->type())) {
2538 r_lo = right->as_register();
2539 } else {
2540 r_lo = right->as_register_lo();
2541 }
2542 #else
2543 Register r_lo = right->as_register_lo();
2544 Register r_hi = right->as_register_hi();
2545 assert(l_lo != r_hi, "overwriting registers");
2546 #endif
2547 switch (code) {
2548 case lir_logic_and:
2549 __ andptr(l_lo, r_lo);
2550 NOT_LP64(__ andptr(l_hi, r_hi);)
2551 break;
2552 case lir_logic_or:
2553 __ orptr(l_lo, r_lo);
2554 NOT_LP64(__ orptr(l_hi, r_hi);)
2555 break;
2556 case lir_logic_xor:
2557 __ xorptr(l_lo, r_lo);
2558 NOT_LP64(__ xorptr(l_hi, r_hi);)
2559 break;
2560 default: ShouldNotReachHere();
2561 }
2562 }
2563
2564 Register dst_lo = dst->as_register_lo();
2565 Register dst_hi = dst->as_register_hi();
2566
2567 #ifdef _LP64
2568 move_regs(l_lo, dst_lo);
2569 #else
2570 if (dst_lo == l_hi) {
2571 assert(dst_hi != l_lo, "overwriting registers");
2572 move_regs(l_hi, dst_hi);
2573 move_regs(l_lo, dst_lo);
2574 } else {
2575 assert(dst_lo != l_hi, "overwriting registers");
2576 move_regs(l_lo, dst_lo);
2577 move_regs(l_hi, dst_hi);
2578 }
2579 #endif // _LP64
2580 }
2581 }
2582
2583
2584 // we assume that rax, and rdx can be overwritten
2585 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
2586
2587 assert(left->is_single_cpu(), "left must be register");
2588 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
2589 assert(result->is_single_cpu(), "result must be register");
2590
2591 // assert(left->destroys_register(), "check");
2592 // assert(right->destroys_register(), "check");
2593
2594 Register lreg = left->as_register();
2595 Register dreg = result->as_register();
2596
2597 if (right->is_constant()) {
2598 jint divisor = right->as_constant_ptr()->as_jint();
2599 assert(divisor > 0 && is_power_of_2(divisor), "must be");
2600 if (code == lir_idiv) {
2601 assert(lreg == rax, "must be rax,");
2602 assert(temp->as_register() == rdx, "tmp register must be rdx");
2603 __ cdql(); // sign extend into rdx:rax
2604 if (divisor == 2) {
2605 __ subl(lreg, rdx);
2606 } else {
2607 __ andl(rdx, divisor - 1);
2608 __ addl(lreg, rdx);
2609 }
2610 __ sarl(lreg, log2i_exact(divisor));
2611 move_regs(lreg, dreg);
2612 } else if (code == lir_irem) {
2613 Label done;
2614 __ mov(dreg, lreg);
2615 __ andl(dreg, 0x80000000 | (divisor - 1));
2616 __ jcc(Assembler::positive, done);
2617 __ decrement(dreg);
2618 __ orl(dreg, ~(divisor - 1));
2619 __ increment(dreg);
2620 __ bind(done);
2621 } else {
2622 ShouldNotReachHere();
2623 }
2624 } else {
2625 Register rreg = right->as_register();
2626 assert(lreg == rax, "left register must be rax,");
2627 assert(rreg != rdx, "right register must not be rdx");
2628 assert(temp->as_register() == rdx, "tmp register must be rdx");
2629
2630 move_regs(lreg, rax);
2631
2632 int idivl_offset = __ corrected_idivl(rreg);
2633 if (ImplicitDiv0Checks) {
2634 add_debug_info_for_div0(idivl_offset, info);
2635 }
2636 if (code == lir_irem) {
2637 move_regs(rdx, dreg); // result is in rdx
2638 } else {
2639 move_regs(rax, dreg);
2640 }
2641 }
2642 }
2643
2644
2645 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2646 if (opr1->is_single_cpu()) {
2647 Register reg1 = opr1->as_register();
2648 if (opr2->is_single_cpu()) {
2649 // cpu register - cpu register
2650 if (is_reference_type(opr1->type())) {
2651 __ cmpoop(reg1, opr2->as_register());
2652 } else {
2653 assert(!is_reference_type(opr2->type()), "cmp int, oop?");
2654 __ cmpl(reg1, opr2->as_register());
2655 }
2656 } else if (opr2->is_stack()) {
2657 // cpu register - stack
2658 if (is_reference_type(opr1->type())) {
2659 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2660 } else {
2661 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2662 }
2663 } else if (opr2->is_constant()) {
2664 // cpu register - constant
2665 LIR_Const* c = opr2->as_constant_ptr();
2666 if (c->type() == T_INT) {
2667 __ cmpl(reg1, c->as_jint());
2668 } else if (c->type() == T_METADATA) {
2669 // All we need for now is a comparison with NULL for equality.
2670 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
2671 Metadata* m = c->as_metadata();
2672 if (m == NULL) {
2673 __ cmpptr(reg1, (int32_t)0);
2674 } else {
2675 ShouldNotReachHere();
2676 }
2677 } else if (is_reference_type(c->type())) {
2678 // In 64bit oops are single register
2679 jobject o = c->as_jobject();
2680 if (o == NULL) {
2681 __ cmpptr(reg1, (int32_t)NULL_WORD);
2682 } else {
2683 __ cmpoop(reg1, o);
2684 }
2685 } else {
2686 fatal("unexpected type: %s", basictype_to_str(c->type()));
2687 }
2688 // cpu register - address
2689 } else if (opr2->is_address()) {
2690 if (op->info() != NULL) {
2691 add_debug_info_for_null_check_here(op->info());
2692 }
2693 __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2694 } else {
2695 ShouldNotReachHere();
2696 }
2697
2698 } else if(opr1->is_double_cpu()) {
2699 Register xlo = opr1->as_register_lo();
2700 Register xhi = opr1->as_register_hi();
2701 if (opr2->is_double_cpu()) {
2702 #ifdef _LP64
2703 __ cmpptr(xlo, opr2->as_register_lo());
2704 #else
2705 // cpu register - cpu register
2706 Register ylo = opr2->as_register_lo();
2707 Register yhi = opr2->as_register_hi();
2708 __ subl(xlo, ylo);
2709 __ sbbl(xhi, yhi);
2710 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2711 __ orl(xhi, xlo);
2712 }
2713 #endif // _LP64
2714 } else if (opr2->is_constant()) {
2715 // cpu register - constant 0
2716 assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2717 #ifdef _LP64
2718 __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2719 #else
2720 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
2721 __ orl(xhi, xlo);
2722 #endif // _LP64
2723 } else {
2724 ShouldNotReachHere();
2725 }
2726
2727 } else if (opr1->is_single_xmm()) {
2728 XMMRegister reg1 = opr1->as_xmm_float_reg();
2729 if (opr2->is_single_xmm()) {
2730 // xmm register - xmm register
2731 __ ucomiss(reg1, opr2->as_xmm_float_reg());
2732 } else if (opr2->is_stack()) {
2733 // xmm register - stack
2734 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2735 } else if (opr2->is_constant()) {
2736 // xmm register - constant
2737 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2738 } else if (opr2->is_address()) {
2739 // xmm register - address
2740 if (op->info() != NULL) {
2741 add_debug_info_for_null_check_here(op->info());
2742 }
2743 __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2744 } else {
2745 ShouldNotReachHere();
2746 }
2747
2748 } else if (opr1->is_double_xmm()) {
2749 XMMRegister reg1 = opr1->as_xmm_double_reg();
2750 if (opr2->is_double_xmm()) {
2751 // xmm register - xmm register
2752 __ ucomisd(reg1, opr2->as_xmm_double_reg());
2753 } else if (opr2->is_stack()) {
2754 // xmm register - stack
2755 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2756 } else if (opr2->is_constant()) {
2757 // xmm register - constant
2758 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2759 } else if (opr2->is_address()) {
2760 // xmm register - address
2761 if (op->info() != NULL) {
2762 add_debug_info_for_null_check_here(op->info());
2763 }
2764 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2765 } else {
2766 ShouldNotReachHere();
2767 }
2768
2769 #ifndef _LP64
2770 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2771 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2772 assert(opr2->is_fpu_register(), "both must be registers");
2773 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2774 #endif // LP64
2775
2776 } else if (opr1->is_address() && opr2->is_constant()) {
2777 LIR_Const* c = opr2->as_constant_ptr();
2778 #ifdef _LP64
2779 if (is_reference_type(c->type())) {
2780 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2781 __ movoop(rscratch1, c->as_jobject());
2782 }
2783 #endif // LP64
2784 if (op->info() != NULL) {
2785 add_debug_info_for_null_check_here(op->info());
2786 }
2787 // special case: address - constant
2788 LIR_Address* addr = opr1->as_address_ptr();
2789 if (c->type() == T_INT) {
2790 __ cmpl(as_Address(addr), c->as_jint());
2791 } else if (is_reference_type(c->type())) {
2792 #ifdef _LP64
2793 // %%% Make this explode if addr isn't reachable until we figure out a
2794 // better strategy by giving noreg as the temp for as_Address
2795 __ cmpoop(rscratch1, as_Address(addr, noreg));
2796 #else
2797 __ cmpoop(as_Address(addr), c->as_jobject());
2798 #endif // _LP64
2799 } else {
2800 ShouldNotReachHere();
2801 }
2802
2803 } else {
2804 ShouldNotReachHere();
2805 }
2806 }
2807
2808 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2809 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2810 if (left->is_single_xmm()) {
2811 assert(right->is_single_xmm(), "must match");
2812 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2813 } else if (left->is_double_xmm()) {
2814 assert(right->is_double_xmm(), "must match");
2815 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2816
2817 } else {
2818 #ifdef _LP64
2819 ShouldNotReachHere();
2820 #else
2821 assert(left->is_single_fpu() || left->is_double_fpu(), "must be");
2822 assert(right->is_single_fpu() || right->is_double_fpu(), "must match");
2823
2824 assert(left->fpu() == 0, "left must be on TOS");
2825 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),
2826 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2827 #endif // LP64
2828 }
2829 } else {
2830 assert(code == lir_cmp_l2i, "check");
2831 #ifdef _LP64
2832 Label done;
2833 Register dest = dst->as_register();
2834 __ cmpptr(left->as_register_lo(), right->as_register_lo());
2835 __ movl(dest, -1);
2836 __ jccb(Assembler::less, done);
2837 __ set_byte_if_not_zero(dest);
2838 __ movzbl(dest, dest);
2839 __ bind(done);
2840 #else
2841 __ lcmp2int(left->as_register_hi(),
2842 left->as_register_lo(),
2843 right->as_register_hi(),
2844 right->as_register_lo());
2845 move_regs(left->as_register_hi(), dst->as_register());
2846 #endif // _LP64
2847 }
2848 }
2849
2850
2851 void LIR_Assembler::align_call(LIR_Code code) {
2852 // make sure that the displacement word of the call ends up word aligned
2853 int offset = __ offset();
2854 switch (code) {
2855 case lir_static_call:
2856 case lir_optvirtual_call:
2857 case lir_dynamic_call:
2858 offset += NativeCall::displacement_offset;
2859 break;
2860 case lir_icvirtual_call:
2861 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2862 break;
2863 default: ShouldNotReachHere();
2864 }
2865 __ align(BytesPerWord, offset);
2866 }
2867
2868
2869 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2870 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2871 "must be aligned");
2872 __ call(AddressLiteral(op->addr(), rtype));
2873 add_call_info(code_offset(), op->info());
2874 }
2875
2876
2877 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2878 __ ic_call(op->addr());
2879 add_call_info(code_offset(), op->info());
2880 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2881 "must be aligned");
2882 }
2883
2884
2885 void LIR_Assembler::emit_static_call_stub() {
2886 address call_pc = __ pc();
2887 address stub = __ start_a_stub(call_stub_size());
2888 if (stub == NULL) {
2889 bailout("static call stub overflow");
2890 return;
2891 }
2892
2893 int start = __ offset();
2894
2895 // make sure that the displacement word of the call ends up word aligned
2896 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2897 __ relocate(static_stub_Relocation::spec(call_pc));
2898 __ mov_metadata(rbx, (Metadata*)NULL);
2899 // must be set to -1 at code generation time
2900 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2901 // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2902 __ jump(RuntimeAddress(__ pc()));
2903
2904 assert(__ offset() - start <= call_stub_size(), "stub too big");
2905 __ end_a_stub();
2906 }
2907
2908
2909 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2910 assert(exceptionOop->as_register() == rax, "must match");
2911 assert(exceptionPC->as_register() == rdx, "must match");
2912
2913 // exception object is not added to oop map by LinearScan
2914 // (LinearScan assumes that no oops are in fixed registers)
2915 info->add_register_oop(exceptionOop);
2916 Runtime1::StubID unwind_id;
2917
2918 // get current pc information
2919 // pc is only needed if the method has an exception handler, the unwind code does not need it.
2920 int pc_for_athrow_offset = __ offset();
2921 InternalAddress pc_for_athrow(__ pc());
2922 __ lea(exceptionPC->as_register(), pc_for_athrow);
2923 add_call_info(pc_for_athrow_offset, info); // for exception handler
2924
2925 __ verify_not_null_oop(rax);
2926 // search an exception handler (rax: exception oop, rdx: throwing pc)
2927 if (compilation()->has_fpu_code()) {
2928 unwind_id = Runtime1::handle_exception_id;
2929 } else {
2930 unwind_id = Runtime1::handle_exception_nofpu_id;
2931 }
2932 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2933
2934 // enough room for two byte trap
2935 __ nop();
2936 }
2937
2938
2939 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2940 assert(exceptionOop->as_register() == rax, "must match");
2941
2942 __ jmp(_unwind_handler_entry);
2943 }
2944
2945
2946 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2947
2948 // optimized version for linear scan:
2949 // * count must be already in ECX (guaranteed by LinearScan)
2950 // * left and dest must be equal
2951 // * tmp must be unused
2952 assert(count->as_register() == SHIFT_count, "count must be in ECX");
2953 assert(left == dest, "left and dest must be equal");
2954 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2955
2956 if (left->is_single_cpu()) {
2957 Register value = left->as_register();
2958 assert(value != SHIFT_count, "left cannot be ECX");
2959
2960 switch (code) {
2961 case lir_shl: __ shll(value); break;
2962 case lir_shr: __ sarl(value); break;
2963 case lir_ushr: __ shrl(value); break;
2964 default: ShouldNotReachHere();
2965 }
2966 } else if (left->is_double_cpu()) {
2967 Register lo = left->as_register_lo();
2968 Register hi = left->as_register_hi();
2969 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2970 #ifdef _LP64
2971 switch (code) {
2972 case lir_shl: __ shlptr(lo); break;
2973 case lir_shr: __ sarptr(lo); break;
2974 case lir_ushr: __ shrptr(lo); break;
2975 default: ShouldNotReachHere();
2976 }
2977 #else
2978
2979 switch (code) {
2980 case lir_shl: __ lshl(hi, lo); break;
2981 case lir_shr: __ lshr(hi, lo, true); break;
2982 case lir_ushr: __ lshr(hi, lo, false); break;
2983 default: ShouldNotReachHere();
2984 }
2985 #endif // LP64
2986 } else {
2987 ShouldNotReachHere();
2988 }
2989 }
2990
2991
2992 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2993 if (dest->is_single_cpu()) {
2994 // first move left into dest so that left is not destroyed by the shift
2995 Register value = dest->as_register();
2996 count = count & 0x1F; // Java spec
2997
2998 move_regs(left->as_register(), value);
2999 switch (code) {
3000 case lir_shl: __ shll(value, count); break;
3001 case lir_shr: __ sarl(value, count); break;
3002 case lir_ushr: __ shrl(value, count); break;
3003 default: ShouldNotReachHere();
3004 }
3005 } else if (dest->is_double_cpu()) {
3006 #ifndef _LP64
3007 Unimplemented();
3008 #else
3009 // first move left into dest so that left is not destroyed by the shift
3010 Register value = dest->as_register_lo();
3011 count = count & 0x1F; // Java spec
3012
3013 move_regs(left->as_register_lo(), value);
3014 switch (code) {
3015 case lir_shl: __ shlptr(value, count); break;
3016 case lir_shr: __ sarptr(value, count); break;
3017 case lir_ushr: __ shrptr(value, count); break;
3018 default: ShouldNotReachHere();
3019 }
3020 #endif // _LP64
3021 } else {
3022 ShouldNotReachHere();
3023 }
3024 }
3025
3026
3027 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
3028 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3029 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3030 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3031 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
3032 }
3033
3034
3035 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
3036 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3037 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3038 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3039 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3040 }
3041
3042
3043 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3044 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3045 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3046 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3047 __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3048 }
3049
3050
3051 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3052 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3053 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3054 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3055 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3056 }
3057
3058
3059 // This code replaces a call to arraycopy; no exception may
3060 // be thrown in this code, they must be thrown in the System.arraycopy
3061 // activation frame; we could save some checks if this would not be the case
3062 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3063 ciArrayKlass* default_type = op->expected_type();
3064 Register src = op->src()->as_register();
3065 Register dst = op->dst()->as_register();
3066 Register src_pos = op->src_pos()->as_register();
3067 Register dst_pos = op->dst_pos()->as_register();
3068 Register length = op->length()->as_register();
3069 Register tmp = op->tmp()->as_register();
3070 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3071 Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
3072
3073 CodeStub* stub = op->stub();
3074 int flags = op->flags();
3075 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3076 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3077
3078 // if we don't know anything, just go through the generic arraycopy
3079 if (default_type == NULL) {
3080 // save outgoing arguments on stack in case call to System.arraycopy is needed
3081 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3082 // for interpreter calling conventions. Now we have to do it in new style conventions.
3083 // For the moment until C1 gets the new register allocator I just force all the
3084 // args to the right place (except the register args) and then on the back side
3085 // reload the register args properly if we go slow path. Yuck
3086
3087 // These are proper for the calling convention
3088 store_parameter(length, 2);
3089 store_parameter(dst_pos, 1);
3090 store_parameter(dst, 0);
3091
3092 // these are just temporary placements until we need to reload
3093 store_parameter(src_pos, 3);
3094 store_parameter(src, 4);
3095 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3096
3097 address copyfunc_addr = StubRoutines::generic_arraycopy();
3098 assert(copyfunc_addr != NULL, "generic arraycopy stub required");
3099
3100 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
3101 #ifdef _LP64
3102 // The arguments are in java calling convention so we can trivially shift them to C
3103 // convention
3104 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3105 __ mov(c_rarg0, j_rarg0);
3106 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3107 __ mov(c_rarg1, j_rarg1);
3108 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3109 __ mov(c_rarg2, j_rarg2);
3110 assert_different_registers(c_rarg3, j_rarg4);
3111 __ mov(c_rarg3, j_rarg3);
3112 #ifdef _WIN64
3113 // Allocate abi space for args but be sure to keep stack aligned
3114 __ subptr(rsp, 6*wordSize);
3115 store_parameter(j_rarg4, 4);
3116 #ifndef PRODUCT
3117 if (PrintC1Statistics) {
3118 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3119 }
3120 #endif
3121 __ call(RuntimeAddress(copyfunc_addr));
3122 __ addptr(rsp, 6*wordSize);
3123 #else
3124 __ mov(c_rarg4, j_rarg4);
3125 #ifndef PRODUCT
3126 if (PrintC1Statistics) {
3127 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3128 }
3129 #endif
3130 __ call(RuntimeAddress(copyfunc_addr));
3131 #endif // _WIN64
3132 #else
3133 __ push(length);
3134 __ push(dst_pos);
3135 __ push(dst);
3136 __ push(src_pos);
3137 __ push(src);
3138
3139 #ifndef PRODUCT
3140 if (PrintC1Statistics) {
3141 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3142 }
3143 #endif
3144 __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
3145
3146 #endif // _LP64
3147
3148 __ cmpl(rax, 0);
3149 __ jcc(Assembler::equal, *stub->continuation());
3150
3151 __ mov(tmp, rax);
3152 __ xorl(tmp, -1);
3153
3154 // Reload values from the stack so they are where the stub
3155 // expects them.
3156 __ movptr (dst, Address(rsp, 0*BytesPerWord));
3157 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3158 __ movptr (length, Address(rsp, 2*BytesPerWord));
3159 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3160 __ movptr (src, Address(rsp, 4*BytesPerWord));
3161
3162 __ subl(length, tmp);
3163 __ addl(src_pos, tmp);
3164 __ addl(dst_pos, tmp);
3165 __ jmp(*stub->entry());
3166
3167 __ bind(*stub->continuation());
3168 return;
3169 }
3170
3171 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3172
3173 int elem_size = type2aelembytes(basic_type);
3174 Address::ScaleFactor scale;
3175
3176 switch (elem_size) {
3177 case 1 :
3178 scale = Address::times_1;
3179 break;
3180 case 2 :
3181 scale = Address::times_2;
3182 break;
3183 case 4 :
3184 scale = Address::times_4;
3185 break;
3186 case 8 :
3187 scale = Address::times_8;
3188 break;
3189 default:
3190 scale = Address::no_scale;
3191 ShouldNotReachHere();
3192 }
3193
3194 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
3195 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
3196 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
3197 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
3198
3199 // length and pos's are all sign extended at this point on 64bit
3200
3201 // test for NULL
3202 if (flags & LIR_OpArrayCopy::src_null_check) {
3203 __ testptr(src, src);
3204 __ jcc(Assembler::zero, *stub->entry());
3205 }
3206 if (flags & LIR_OpArrayCopy::dst_null_check) {
3207 __ testptr(dst, dst);
3208 __ jcc(Assembler::zero, *stub->entry());
3209 }
3210
3211 // If the compiler was not able to prove that exact type of the source or the destination
3212 // of the arraycopy is an array type, check at runtime if the source or the destination is
3213 // an instance type.
3214 if (flags & LIR_OpArrayCopy::type_check) {
3215 if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3216 __ load_klass(tmp, dst, tmp_load_klass);
3217 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3218 __ jcc(Assembler::greaterEqual, *stub->entry());
3219 }
3220
3221 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3222 __ load_klass(tmp, src, tmp_load_klass);
3223 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3224 __ jcc(Assembler::greaterEqual, *stub->entry());
3225 }
3226 }
3227
3228 // check if negative
3229 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
3230 __ testl(src_pos, src_pos);
3231 __ jcc(Assembler::less, *stub->entry());
3232 }
3233 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
3234 __ testl(dst_pos, dst_pos);
3235 __ jcc(Assembler::less, *stub->entry());
3236 }
3237
3238 if (flags & LIR_OpArrayCopy::src_range_check) {
3239 __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3240 __ cmpl(tmp, src_length_addr);
3241 __ jcc(Assembler::above, *stub->entry());
3242 }
3243 if (flags & LIR_OpArrayCopy::dst_range_check) {
3244 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3245 __ cmpl(tmp, dst_length_addr);
3246 __ jcc(Assembler::above, *stub->entry());
3247 }
3248
3249 if (flags & LIR_OpArrayCopy::length_positive_check) {
3250 __ testl(length, length);
3251 __ jcc(Assembler::less, *stub->entry());
3252 }
3253
3254 #ifdef _LP64
3255 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3256 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3257 #endif
3258
3259 if (flags & LIR_OpArrayCopy::type_check) {
3260 // We don't know the array types are compatible
3261 if (basic_type != T_OBJECT) {
3262 // Simple test for basic type arrays
3263 __ cmp_klass(src, dst, tmp, tmp2);
3264 __ jcc(Assembler::notEqual, *stub->entry());
3265 } else {
3266 // For object arrays, if src is a sub class of dst then we can
3267 // safely do the copy.
3268 Label cont, slow;
3269
3270 __ push(src);
3271 __ push(dst);
3272
3273 __ load_klass(src, src, tmp_load_klass);
3274 __ load_klass(dst, dst, tmp_load_klass);
3275
3276 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3277
3278 __ push(src);
3279 __ push(dst);
3280 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3281 __ pop(dst);
3282 __ pop(src);
3283
3284 __ cmpl(src, 0);
3285 __ jcc(Assembler::notEqual, cont);
3286
3287 __ bind(slow);
3288 __ pop(dst);
3289 __ pop(src);
3290
3291 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
3292 if (copyfunc_addr != NULL) { // use stub if available
3293 // src is not a sub class of dst so we have to do a
3294 // per-element check.
3295
3296 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
3297 if ((flags & mask) != mask) {
3298 // Check that at least both of them object arrays.
3299 assert(flags & mask, "one of the two should be known to be an object array");
3300
3301 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3302 __ load_klass(tmp, src, tmp_load_klass);
3303 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3304 __ load_klass(tmp, dst, tmp_load_klass);
3305 }
3306 int lh_offset = in_bytes(Klass::layout_helper_offset());
3307 Address klass_lh_addr(tmp, lh_offset);
3308 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3309 __ cmpl(klass_lh_addr, objArray_lh);
3310 __ jcc(Assembler::notEqual, *stub->entry());
3311 }
3312
3313 // Spill because stubs can use any register they like and it's
3314 // easier to restore just those that we care about.
3315 store_parameter(dst, 0);
3316 store_parameter(dst_pos, 1);
3317 store_parameter(length, 2);
3318 store_parameter(src_pos, 3);
3319 store_parameter(src, 4);
3320
3321 #ifndef _LP64
3322 __ movptr(tmp, dst_klass_addr);
3323 __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));
3324 __ push(tmp);
3325 __ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
3326 __ push(tmp);
3327 __ push(length);
3328 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3329 __ push(tmp);
3330 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3331 __ push(tmp);
3332
3333 __ call_VM_leaf(copyfunc_addr, 5);
3334 #else
3335 __ movl2ptr(length, length); //higher 32bits must be null
3336
3337 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3338 assert_different_registers(c_rarg0, dst, dst_pos, length);
3339 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3340 assert_different_registers(c_rarg1, dst, length);
3341
3342 __ mov(c_rarg2, length);
3343 assert_different_registers(c_rarg2, dst);
3344
3345 #ifdef _WIN64
3346 // Allocate abi space for args but be sure to keep stack aligned
3347 __ subptr(rsp, 6*wordSize);
3348 __ load_klass(c_rarg3, dst, tmp_load_klass);
3349 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
3350 store_parameter(c_rarg3, 4);
3351 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
3352 __ call(RuntimeAddress(copyfunc_addr));
3353 __ addptr(rsp, 6*wordSize);
3354 #else
3355 __ load_klass(c_rarg4, dst, tmp_load_klass);
3356 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
3357 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
3358 __ call(RuntimeAddress(copyfunc_addr));
3359 #endif
3360
3361 #endif
3362
3363 #ifndef PRODUCT
3364 if (PrintC1Statistics) {
3365 Label failed;
3366 __ testl(rax, rax);
3367 __ jcc(Assembler::notZero, failed);
3368 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
3369 __ bind(failed);
3370 }
3371 #endif
3372
3373 __ testl(rax, rax);
3374 __ jcc(Assembler::zero, *stub->continuation());
3375
3376 #ifndef PRODUCT
3377 if (PrintC1Statistics) {
3378 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
3379 }
3380 #endif
3381
3382 __ mov(tmp, rax);
3383
3384 __ xorl(tmp, -1);
3385
3386 // Restore previously spilled arguments
3387 __ movptr (dst, Address(rsp, 0*BytesPerWord));
3388 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3389 __ movptr (length, Address(rsp, 2*BytesPerWord));
3390 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3391 __ movptr (src, Address(rsp, 4*BytesPerWord));
3392
3393
3394 __ subl(length, tmp);
3395 __ addl(src_pos, tmp);
3396 __ addl(dst_pos, tmp);
3397 }
3398
3399 __ jmp(*stub->entry());
3400
3401 __ bind(cont);
3402 __ pop(dst);
3403 __ pop(src);
3404 }
3405 }
3406
3407 #ifdef ASSERT
3408 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3409 // Sanity check the known type with the incoming class. For the
3410 // primitive case the types must match exactly with src.klass and
3411 // dst.klass each exactly matching the default type. For the
3412 // object array case, if no type check is needed then either the
3413 // dst type is exactly the expected type and the src type is a
3414 // subtype which we can't check or src is the same array as dst
3415 // but not necessarily exactly of type default_type.
3416 Label known_ok, halt;
3417 __ mov_metadata(tmp, default_type->constant_encoding());
3418 #ifdef _LP64
3419 if (UseCompressedClassPointers) {
3420 __ encode_klass_not_null(tmp, rscratch1);
3421 }
3422 #endif
3423 if (basic_type != T_OBJECT) {
3424 __ cmp_klass(tmp, dst, tmp2);
3425 __ jcc(Assembler::notEqual, halt);
3426 __ cmp_klass(tmp, src, tmp2);
3427 __ jcc(Assembler::equal, known_ok);
3428 } else {
3429 __ cmp_klass(tmp, dst, tmp2);
3430 __ jcc(Assembler::equal, known_ok);
3431 __ cmpptr(src, dst);
3432 __ jcc(Assembler::equal, known_ok);
3433 }
3434 __ bind(halt);
3435 __ stop("incorrect type information in arraycopy");
3436 __ bind(known_ok);
3437 }
3438 #endif
3439
3440 #ifndef PRODUCT
3441 if (PrintC1Statistics) {
3442 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3443 }
3444 #endif
3445
3446 #ifdef _LP64
3447 assert_different_registers(c_rarg0, dst, dst_pos, length);
3448 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3449 assert_different_registers(c_rarg1, length);
3450 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3451 __ mov(c_rarg2, length);
3452
3453 #else
3454 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3455 store_parameter(tmp, 0);
3456 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3457 store_parameter(tmp, 1);
3458 store_parameter(length, 2);
3459 #endif // _LP64
3460
3461 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
3462 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
3463 const char *name;
3464 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
3465 __ call_VM_leaf(entry, 0);
3466
3467 __ bind(*stub->continuation());
3468 }
3469
3470 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3471 assert(op->crc()->is_single_cpu(), "crc must be register");
3472 assert(op->val()->is_single_cpu(), "byte value must be register");
3473 assert(op->result_opr()->is_single_cpu(), "result must be register");
3474 Register crc = op->crc()->as_register();
3475 Register val = op->val()->as_register();
3476 Register res = op->result_opr()->as_register();
3477
3478 assert_different_registers(val, crc, res);
3479
3480 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3481 __ notl(crc); // ~crc
3482 __ update_byte_crc32(crc, val, res);
3483 __ notl(crc); // ~crc
3484 __ mov(res, crc);
3485 }
3486
3487 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3488 Register obj = op->obj_opr()->as_register(); // may not be an oop
3489 Register hdr = op->hdr_opr()->as_register();
3490 Register lock = op->lock_opr()->as_register();
3491 if (LockingMode == LM_MONITOR) {
3492 __ jmp(*op->stub()->entry());
3493 } else if (op->code() == lir_lock) {
3494 Register scratch = noreg;
3495 if (UseBiasedLocking || LockingMode == LM_LIGHTWEIGHT) {
3496 scratch = op->scratch_opr()->as_register();
3497 }
3498 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3499 // add debug info for NullPointerException only if one is possible
3500 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3501 if (op->info() != NULL) {
3502 add_debug_info_for_null_check(null_check_offset, op->info());
3503 }
3504 // done
3505 } else if (op->code() == lir_unlock) {
3506 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3507 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3508 } else {
3509 Unimplemented();
3510 }
3511 __ bind(*op->stub()->continuation());
3512 }
3513
3514 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
3515 Register obj = op->obj()->as_pointer_register();
3516 Register result = op->result_opr()->as_pointer_register();
3517
3518 CodeEmitInfo* info = op->info();
3519 if (info != NULL) {
3520 add_debug_info_for_null_check_here(info);
3521 }
3522
3523 #ifdef _LP64
3524 if (UseCompactObjectHeaders) {
3525 Register tmp = rscratch1;
3526 assert_different_registers(tmp, obj);
3527 assert_different_registers(tmp, result);
3528
3529 // Check if we can take the (common) fast path, if obj is unlocked.
3530 __ movq(result, Address(obj, oopDesc::mark_offset_in_bytes()));
3531 __ testb(result, markWord::monitor_value);
3532 __ jcc(Assembler::notZero, *op->stub()->entry());
3533 __ bind(*op->stub()->continuation());
3534 // Fast-path: shift and decode Klass*.
3535 __ shrq(result, markWord::klass_shift);
3536 __ decode_klass_not_null(result, tmp);
3537 } else if (UseCompressedClassPointers) {
3538 __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3539 __ decode_klass_not_null(result, rscratch1);
3540 } else
3541 #endif
3542 __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3543 }
3544
3545 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3546 ciMethod* method = op->profiled_method();
3547 int bci = op->profiled_bci();
3548 ciMethod* callee = op->profiled_callee();
3549 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3550
3551 // Update counter for all call types
3552 ciMethodData* md = method->method_data_or_null();
3553 assert(md != NULL, "Sanity");
3554 ciProfileData* data = md->bci_to_data(bci);
3555 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3556 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3557 Register mdo = op->mdo()->as_register();
3558 __ mov_metadata(mdo, md->constant_encoding());
3559 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3560 // Perform additional virtual call profiling for invokevirtual and
3561 // invokeinterface bytecodes
3562 if (op->should_profile_receiver_type()) {
3563 assert(op->recv()->is_single_cpu(), "recv must be allocated");
3564 Register recv = op->recv()->as_register();
3565 assert_different_registers(mdo, recv);
3566 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3567 ciKlass* known_klass = op->known_holder();
3568 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3569 // We know the type that will be seen at this call site; we can
3570 // statically update the MethodData* rather than needing to do
3571 // dynamic tests on the receiver type
3572
3573 // NOTE: we should probably put a lock around this search to
3574 // avoid collisions by concurrent compilations
3575 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3576 uint i;
3577 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3578 ciKlass* receiver = vc_data->receiver(i);
3579 if (known_klass->equals(receiver)) {
3580 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3581 __ addptr(data_addr, DataLayout::counter_increment);
3582 return;
3583 }
3584 }
3585
3586 // Receiver type not found in profile data; select an empty slot
3587
3588 // Note that this is less efficient than it should be because it
3589 // always does a write to the receiver part of the
3590 // VirtualCallData rather than just the first time
3591 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3592 ciKlass* receiver = vc_data->receiver(i);
3593 if (receiver == NULL) {
3594 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3595 __ mov_metadata(recv_addr, known_klass->constant_encoding());
3596 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3597 __ addptr(data_addr, DataLayout::counter_increment);
3598 return;
3599 }
3600 }
3601 } else {
3602 __ load_klass(recv, recv, tmp_load_klass);
3603 Label update_done;
3604 type_profile_helper(mdo, md, data, recv, &update_done);
3605 // Receiver did not match any saved receiver and there is no empty row for it.
3606 // Increment total counter to indicate polymorphic case.
3607 __ addptr(counter_addr, DataLayout::counter_increment);
3608
3609 __ bind(update_done);
3610 }
3611 } else {
3612 // Static call
3613 __ addptr(counter_addr, DataLayout::counter_increment);
3614 }
3615 }
3616
3617 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3618 Register obj = op->obj()->as_register();
3619 Register tmp = op->tmp()->as_pointer_register();
3620 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3621 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3622 ciKlass* exact_klass = op->exact_klass();
3623 intptr_t current_klass = op->current_klass();
3624 bool not_null = op->not_null();
3625 bool no_conflict = op->no_conflict();
3626
3627 Label update, next, none;
3628
3629 bool do_null = !not_null;
3630 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3631 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3632
3633 assert(do_null || do_update, "why are we here?");
3634 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3635
3636 __ verify_oop(obj);
3637
3638 #ifdef ASSERT
3639 if (obj == tmp) {
3640 #ifdef _LP64
3641 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
3642 #else
3643 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
3644 #endif
3645 } else {
3646 #ifdef _LP64
3647 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index());
3648 #else
3649 assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index());
3650 #endif
3651 }
3652 #endif
3653 if (do_null) {
3654 __ testptr(obj, obj);
3655 __ jccb(Assembler::notZero, update);
3656 if (!TypeEntries::was_null_seen(current_klass)) {
3657 __ testptr(mdo_addr, TypeEntries::null_seen);
3658 #ifndef ASSERT
3659 __ jccb(Assembler::notZero, next); // already set
3660 #else
3661 __ jcc(Assembler::notZero, next); // already set
3662 #endif
3663 // atomic update to prevent overwriting Klass* with 0
3664 __ lock();
3665 __ orptr(mdo_addr, TypeEntries::null_seen);
3666 }
3667 if (do_update) {
3668 __ jmp(next);
3669 }
3670 #ifdef ASSERT
3671 } else {
3672 __ testptr(obj, obj);
3673 __ jcc(Assembler::notZero, update);
3674 __ stop("unexpect null obj");
3675 #endif
3676 }
3677
3678 __ bind(update);
3679
3680 if (do_update) {
3681 #ifdef ASSERT
3682 if (exact_klass != NULL) {
3683 Label ok;
3684 __ load_klass(tmp, obj, tmp_load_klass);
3685 __ push(tmp);
3686 __ mov_metadata(tmp, exact_klass->constant_encoding());
3687 __ cmpptr(tmp, Address(rsp, 0));
3688 __ jcc(Assembler::equal, ok);
3689 __ stop("exact klass and actual klass differ");
3690 __ bind(ok);
3691 __ pop(tmp);
3692 }
3693 #endif
3694 if (!no_conflict) {
3695 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
3696 if (exact_klass != NULL) {
3697 __ mov_metadata(tmp, exact_klass->constant_encoding());
3698 } else {
3699 __ load_klass(tmp, obj, tmp_load_klass);
3700 }
3701 #ifdef _LP64
3702 __ mov(rscratch1, tmp); // save original value before XOR
3703 #endif
3704 __ xorptr(tmp, mdo_addr);
3705 __ testptr(tmp, TypeEntries::type_klass_mask);
3706 // klass seen before, nothing to do. The unknown bit may have been
3707 // set already but no need to check.
3708 __ jccb(Assembler::zero, next);
3709
3710 __ testptr(tmp, TypeEntries::type_unknown);
3711 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3712
3713 if (TypeEntries::is_type_none(current_klass)) {
3714 __ testptr(mdo_addr, TypeEntries::type_mask);
3715 __ jccb(Assembler::zero, none);
3716 #ifdef _LP64
3717 // There is a chance that the checks above (re-reading profiling
3718 // data from memory) fail if another thread has just set the
3719 // profiling to this obj's klass
3720 __ mov(tmp, rscratch1); // get back original value before XOR
3721 __ xorptr(tmp, mdo_addr);
3722 __ testptr(tmp, TypeEntries::type_klass_mask);
3723 __ jccb(Assembler::zero, next);
3724 #endif
3725 }
3726 } else {
3727 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3728 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3729
3730 __ testptr(mdo_addr, TypeEntries::type_unknown);
3731 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3732 }
3733
3734 // different than before. Cannot keep accurate profile.
3735 __ orptr(mdo_addr, TypeEntries::type_unknown);
3736
3737 if (TypeEntries::is_type_none(current_klass)) {
3738 __ jmpb(next);
3739
3740 __ bind(none);
3741 // first time here. Set profile type.
3742 __ movptr(mdo_addr, tmp);
3743 #ifdef ASSERT
3744 __ andptr(tmp, TypeEntries::type_klass_mask);
3745 __ verify_klass_ptr(tmp);
3746 #endif
3747 }
3748 } else {
3749 // There's a single possible klass at this profile point
3750 assert(exact_klass != NULL, "should be");
3751 if (TypeEntries::is_type_none(current_klass)) {
3752 __ mov_metadata(tmp, exact_klass->constant_encoding());
3753 __ xorptr(tmp, mdo_addr);
3754 __ testptr(tmp, TypeEntries::type_klass_mask);
3755 #ifdef ASSERT
3756 __ jcc(Assembler::zero, next);
3757
3758 {
3759 Label ok;
3760 __ push(tmp);
3761 __ testptr(mdo_addr, TypeEntries::type_mask);
3762 __ jcc(Assembler::zero, ok);
3763 // may have been set by another thread
3764 __ mov_metadata(tmp, exact_klass->constant_encoding());
3765 __ xorptr(tmp, mdo_addr);
3766 __ testptr(tmp, TypeEntries::type_mask);
3767 __ jcc(Assembler::zero, ok);
3768
3769 __ stop("unexpected profiling mismatch");
3770 __ bind(ok);
3771 __ pop(tmp);
3772 }
3773 #else
3774 __ jccb(Assembler::zero, next);
3775 #endif
3776 // first time here. Set profile type.
3777 __ movptr(mdo_addr, tmp);
3778 #ifdef ASSERT
3779 __ andptr(tmp, TypeEntries::type_klass_mask);
3780 __ verify_klass_ptr(tmp);
3781 #endif
3782 } else {
3783 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3784 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3785
3786 __ testptr(mdo_addr, TypeEntries::type_unknown);
3787 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3788
3789 __ orptr(mdo_addr, TypeEntries::type_unknown);
3790 }
3791 }
3792 }
3793 __ bind(next);
3794 }
3795
3796 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3797 Unimplemented();
3798 }
3799
3800
3801 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3802 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3803 }
3804
3805
3806 void LIR_Assembler::align_backward_branch_target() {
3807 __ align(BytesPerWord);
3808 }
3809
3810
3811 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3812 if (left->is_single_cpu()) {
3813 __ negl(left->as_register());
3814 move_regs(left->as_register(), dest->as_register());
3815
3816 } else if (left->is_double_cpu()) {
3817 Register lo = left->as_register_lo();
3818 #ifdef _LP64
3819 Register dst = dest->as_register_lo();
3820 __ movptr(dst, lo);
3821 __ negptr(dst);
3822 #else
3823 Register hi = left->as_register_hi();
3824 __ lneg(hi, lo);
3825 if (dest->as_register_lo() == hi) {
3826 assert(dest->as_register_hi() != lo, "destroying register");
3827 move_regs(hi, dest->as_register_hi());
3828 move_regs(lo, dest->as_register_lo());
3829 } else {
3830 move_regs(lo, dest->as_register_lo());
3831 move_regs(hi, dest->as_register_hi());
3832 }
3833 #endif // _LP64
3834
3835 } else if (dest->is_single_xmm()) {
3836 #ifdef _LP64
3837 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
3838 assert(tmp->is_valid(), "need temporary");
3839 assert_different_registers(left->as_xmm_float_reg(), tmp->as_xmm_float_reg());
3840 __ vpxor(dest->as_xmm_float_reg(), tmp->as_xmm_float_reg(), left->as_xmm_float_reg(), 2);
3841 }
3842 else
3843 #endif
3844 {
3845 assert(!tmp->is_valid(), "do not need temporary");
3846 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3847 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3848 }
3849 __ xorps(dest->as_xmm_float_reg(),
3850 ExternalAddress((address)float_signflip_pool));
3851 }
3852 } else if (dest->is_double_xmm()) {
3853 #ifdef _LP64
3854 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
3855 assert(tmp->is_valid(), "need temporary");
3856 assert_different_registers(left->as_xmm_double_reg(), tmp->as_xmm_double_reg());
3857 __ vpxor(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), left->as_xmm_double_reg(), 2);
3858 }
3859 else
3860 #endif
3861 {
3862 assert(!tmp->is_valid(), "do not need temporary");
3863 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3864 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3865 }
3866 __ xorpd(dest->as_xmm_double_reg(),
3867 ExternalAddress((address)double_signflip_pool));
3868 }
3869 #ifndef _LP64
3870 } else if (left->is_single_fpu() || left->is_double_fpu()) {
3871 assert(left->fpu() == 0, "arg must be on TOS");
3872 assert(dest->fpu() == 0, "dest must be TOS");
3873 __ fchs();
3874 #endif // !_LP64
3875
3876 } else {
3877 ShouldNotReachHere();
3878 }
3879 }
3880
3881
3882 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3883 assert(src->is_address(), "must be an address");
3884 assert(dest->is_register(), "must be a register");
3885
3886 PatchingStub* patch = NULL;
3887 if (patch_code != lir_patch_none) {
3888 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3889 }
3890
3891 Register reg = dest->as_pointer_register();
3892 LIR_Address* addr = src->as_address_ptr();
3893 __ lea(reg, as_Address(addr));
3894
3895 if (patch != NULL) {
3896 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3897 }
3898 }
3899
3900
3901
3902 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3903 assert(!tmp->is_valid(), "don't need temporary");
3904 __ call(RuntimeAddress(dest));
3905 if (info != NULL) {
3906 add_call_info_here(info);
3907 }
3908 }
3909
3910
3911 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3912 assert(type == T_LONG, "only for volatile long fields");
3913
3914 if (info != NULL) {
3915 add_debug_info_for_null_check_here(info);
3916 }
3917
3918 if (src->is_double_xmm()) {
3919 if (dest->is_double_cpu()) {
3920 #ifdef _LP64
3921 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3922 #else
3923 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3924 __ psrlq(src->as_xmm_double_reg(), 32);
3925 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3926 #endif // _LP64
3927 } else if (dest->is_double_stack()) {
3928 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3929 } else if (dest->is_address()) {
3930 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3931 } else {
3932 ShouldNotReachHere();
3933 }
3934
3935 } else if (dest->is_double_xmm()) {
3936 if (src->is_double_stack()) {
3937 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3938 } else if (src->is_address()) {
3939 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3940 } else {
3941 ShouldNotReachHere();
3942 }
3943
3944 #ifndef _LP64
3945 } else if (src->is_double_fpu()) {
3946 assert(src->fpu_regnrLo() == 0, "must be TOS");
3947 if (dest->is_double_stack()) {
3948 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));
3949 } else if (dest->is_address()) {
3950 __ fistp_d(as_Address(dest->as_address_ptr()));
3951 } else {
3952 ShouldNotReachHere();
3953 }
3954
3955 } else if (dest->is_double_fpu()) {
3956 assert(dest->fpu_regnrLo() == 0, "must be TOS");
3957 if (src->is_double_stack()) {
3958 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));
3959 } else if (src->is_address()) {
3960 __ fild_d(as_Address(src->as_address_ptr()));
3961 } else {
3962 ShouldNotReachHere();
3963 }
3964 #endif // !_LP64
3965
3966 } else {
3967 ShouldNotReachHere();
3968 }
3969 }
3970
3971 #ifdef ASSERT
3972 // emit run-time assertion
3973 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3974 assert(op->code() == lir_assert, "must be");
3975
3976 if (op->in_opr1()->is_valid()) {
3977 assert(op->in_opr2()->is_valid(), "both operands must be valid");
3978 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3979 } else {
3980 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3981 assert(op->condition() == lir_cond_always, "no other conditions allowed");
3982 }
3983
3984 Label ok;
3985 if (op->condition() != lir_cond_always) {
3986 Assembler::Condition acond = Assembler::zero;
3987 switch (op->condition()) {
3988 case lir_cond_equal: acond = Assembler::equal; break;
3989 case lir_cond_notEqual: acond = Assembler::notEqual; break;
3990 case lir_cond_less: acond = Assembler::less; break;
3991 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3992 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3993 case lir_cond_greater: acond = Assembler::greater; break;
3994 case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
3995 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
3996 default: ShouldNotReachHere();
3997 }
3998 __ jcc(acond, ok);
3999 }
4000 if (op->halt()) {
4001 const char* str = __ code_string(op->msg());
4002 __ stop(str);
4003 } else {
4004 breakpoint();
4005 }
4006 __ bind(ok);
4007 }
4008 #endif
4009
4010 void LIR_Assembler::membar() {
4011 // QQQ sparc TSO uses this,
4012 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
4013 }
4014
4015 void LIR_Assembler::membar_acquire() {
4016 // No x86 machines currently require load fences
4017 }
4018
4019 void LIR_Assembler::membar_release() {
4020 // No x86 machines currently require store fences
4021 }
4022
4023 void LIR_Assembler::membar_loadload() {
4024 // no-op
4025 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
4026 }
4027
4028 void LIR_Assembler::membar_storestore() {
4029 // no-op
4030 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
4031 }
4032
4033 void LIR_Assembler::membar_loadstore() {
4034 // no-op
4035 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
4036 }
4037
4038 void LIR_Assembler::membar_storeload() {
4039 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4040 }
4041
4042 void LIR_Assembler::on_spin_wait() {
4043 __ pause ();
4044 }
4045
4046 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4047 assert(result_reg->is_register(), "check");
4048 #ifdef _LP64
4049 // __ get_thread(result_reg->as_register_lo());
4050 __ mov(result_reg->as_register(), r15_thread);
4051 #else
4052 __ get_thread(result_reg->as_register());
4053 #endif // _LP64
4054 }
4055
4056
4057 void LIR_Assembler::peephole(LIR_List*) {
4058 // do nothing for now
4059 }
4060
4061 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4062 assert(data == dest, "xchg/xadd uses only 2 operands");
4063
4064 if (data->type() == T_INT) {
4065 if (code == lir_xadd) {
4066 __ lock();
4067 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4068 } else {
4069 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4070 }
4071 } else if (data->is_oop()) {
4072 assert (code == lir_xchg, "xadd for oops");
4073 Register obj = data->as_register();
4074 #ifdef _LP64
4075 if (UseCompressedOops) {
4076 __ encode_heap_oop(obj);
4077 __ xchgl(obj, as_Address(src->as_address_ptr()));
4078 __ decode_heap_oop(obj);
4079 } else {
4080 __ xchgptr(obj, as_Address(src->as_address_ptr()));
4081 }
4082 #else
4083 __ xchgl(obj, as_Address(src->as_address_ptr()));
4084 #endif
4085 } else if (data->type() == T_LONG) {
4086 #ifdef _LP64
4087 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
4088 if (code == lir_xadd) {
4089 __ lock();
4090 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
4091 } else {
4092 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
4093 }
4094 #else
4095 ShouldNotReachHere();
4096 #endif
4097 } else {
4098 ShouldNotReachHere();
4099 }
4100 }
4101
4102 #undef __
--- EOF ---