1 /*
2 * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_aarch64.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
50 void LIRItem::load_byte_item() {
51 load_item();
52 }
53
54
55 void LIRItem::load_nonconstant() {
56 LIR_Opr r = value()->operand();
57 if (r->is_constant()) {
58 _result = r;
59 } else {
60 load_item();
61 }
62 }
63
64 //--------------------------------------------------------------
65 // LIRGenerator
66 //--------------------------------------------------------------
67
68
69 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
70 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; }
71 LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
72 LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
73 LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
74 LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
75 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
76 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; }
77 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }
78
79
80 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
81 LIR_Opr opr;
82 switch (type->tag()) {
83 case intTag: opr = FrameMap::r0_opr; break;
84 case objectTag: opr = FrameMap::r0_oop_opr; break;
85 case longTag: opr = FrameMap::long0_opr; break;
86 case floatTag: opr = FrameMap::fpu0_float_opr; break;
87 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
88
89 case addressTag:
90 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
91 }
92
93 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
94 return opr;
95 }
96
97
98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
99 LIR_Opr reg = new_register(T_INT);
100 set_vreg_flag(reg, LIRGenerator::byte_reg);
101 return reg;
102 }
103
104
105 //--------- loading items into registers --------------------------------
106
107
108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
109 if (v->type()->as_IntConstant() != NULL) {
110 return v->type()->as_IntConstant()->value() == 0L;
111 } else if (v->type()->as_LongConstant() != NULL) {
112 return v->type()->as_LongConstant()->value() == 0L;
113 } else if (v->type()->as_ObjectConstant() != NULL) {
114 return v->type()->as_ObjectConstant()->value()->is_null_object();
115 } else {
116 return false;
117 }
118 }
119
120 bool LIRGenerator::can_inline_as_constant(Value v) const {
121 // FIXME: Just a guess
122 if (v->type()->as_IntConstant() != NULL) {
123 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
124 } else if (v->type()->as_LongConstant() != NULL) {
125 return v->type()->as_LongConstant()->value() == 0L;
126 } else if (v->type()->as_ObjectConstant() != NULL) {
127 return v->type()->as_ObjectConstant()->value()->is_null_object();
128 } else {
129 return false;
130 }
131 }
132
133
134 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
135
136
137 LIR_Opr LIRGenerator::safepoint_poll_register() {
138 return LIR_OprFact::illegalOpr;
139 }
140
141
142 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
143 int shift, int disp, BasicType type) {
144 assert(base->is_register(), "must be");
145 intx large_disp = disp;
146
147 // accumulate fixed displacements
148 if (index->is_constant()) {
149 LIR_Const *constant = index->as_constant_ptr();
150 if (constant->type() == T_INT) {
151 large_disp += ((intx)index->as_jint()) << shift;
152 } else {
153 assert(constant->type() == T_LONG, "should be");
154 jlong c = index->as_jlong() << shift;
155 if ((jlong)((jint)c) == c) {
156 large_disp += c;
157 index = LIR_OprFact::illegalOpr;
158 } else {
159 LIR_Opr tmp = new_register(T_LONG);
160 __ move(index, tmp);
161 index = tmp;
162 // apply shift and displacement below
163 }
164 }
165 }
166
167 if (index->is_register()) {
168 // apply the shift and accumulate the displacement
169 if (shift > 0) {
170 LIR_Opr tmp = new_pointer_register();
171 __ shift_left(index, shift, tmp);
172 index = tmp;
173 }
174 if (large_disp != 0) {
175 LIR_Opr tmp = new_pointer_register();
176 if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
177 __ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
178 index = tmp;
179 } else {
180 __ move(LIR_OprFact::intptrConst(large_disp), tmp);
181 __ add(tmp, index, tmp);
182 index = tmp;
183 }
184 large_disp = 0;
185 }
186 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
187 // index is illegal so replace it with the displacement loaded into a register
188 index = new_pointer_register();
189 __ move(LIR_OprFact::intptrConst(large_disp), index);
190 large_disp = 0;
191 }
192
193 // at this point we either have base + index or base + displacement
194 if (large_disp == 0 && index->is_register()) {
195 return new LIR_Address(base, index, type);
196 } else {
197 assert(Address::offset_ok_for_immed(large_disp, shift), "failed for large_disp: " INTPTR_FORMAT " and shift %d", large_disp, shift);
198 return new LIR_Address(base, large_disp, type);
199 }
200 }
201
202 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
203 BasicType type) {
204 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
205 int elem_size = type2aelembytes(type);
206 int shift = exact_log2(elem_size);
207 return generate_address(array_opr, index_opr, shift, offset_in_bytes, type);
208 }
209
210 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
211 LIR_Opr r;
212 if (type == T_LONG) {
213 r = LIR_OprFact::longConst(x);
214 if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
215 LIR_Opr tmp = new_register(type);
216 __ move(r, tmp);
217 return tmp;
218 }
219 } else if (type == T_INT) {
220 r = LIR_OprFact::intConst(x);
221 if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
222 // This is all rather nasty. We don't know whether our constant
223 // is required for a logical or an arithmetic operation, wo we
224 // don't know what the range of valid values is!!
225 LIR_Opr tmp = new_register(type);
226 __ move(r, tmp);
227 return tmp;
228 }
229 } else {
230 ShouldNotReachHere();
231 r = NULL; // unreachable
232 }
233 return r;
234 }
235
236
237
238 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
239 LIR_Opr pointer = new_pointer_register();
240 __ move(LIR_OprFact::intptrConst(counter), pointer);
241 LIR_Address* addr = new LIR_Address(pointer, type);
242 increment_counter(addr, step);
243 }
244
245
246 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
247 LIR_Opr imm = NULL;
248 switch(addr->type()) {
249 case T_INT:
250 imm = LIR_OprFact::intConst(step);
251 break;
252 case T_LONG:
253 imm = LIR_OprFact::longConst(step);
254 break;
255 default:
256 ShouldNotReachHere();
257 }
258 LIR_Opr reg = new_register(addr->type());
259 __ load(addr, reg);
260 __ add(reg, imm, reg);
261 __ store(reg, addr);
262 }
263
264 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
265 LIR_Opr reg = new_register(T_INT);
266 __ load(generate_address(base, disp, T_INT), reg, info);
267 __ cmp(condition, reg, LIR_OprFact::intConst(c));
268 }
269
270 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
271 LIR_Opr reg1 = new_register(T_INT);
272 __ load(generate_address(base, disp, type), reg1, info);
273 __ cmp(condition, reg, reg1);
274 }
275
276
277 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
278
279 if (is_power_of_2(c - 1)) {
280 __ shift_left(left, exact_log2(c - 1), tmp);
281 __ add(tmp, left, result);
282 return true;
283 } else if (is_power_of_2(c + 1)) {
284 __ shift_left(left, exact_log2(c + 1), tmp);
285 __ sub(tmp, left, result);
286 return true;
287 } else {
288 return false;
289 }
290 }
291
292 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
293 BasicType type = item->type();
294 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
295 }
296
297 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
298 LIR_Opr tmp1 = new_register(objectType);
299 LIR_Opr tmp2 = new_register(objectType);
300 LIR_Opr tmp3 = new_register(objectType);
301 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
302 }
303
304 //----------------------------------------------------------------------
305 // visitor functions
306 //----------------------------------------------------------------------
307
308 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
309 assert(x->is_pinned(),"");
310 LIRItem obj(x->obj(), this);
311 obj.load_item();
312
313 set_no_result(x);
314
315 // "lock" stores the address of the monitor stack slot, so this is not an oop
316 LIR_Opr lock = new_register(T_INT);
317 // Need a scratch register for biased locking
318 LIR_Opr scratch = LIR_OprFact::illegalOpr;
319 if (UseBiasedLocking) {
320 scratch = new_register(T_INT);
321 }
322
323 CodeEmitInfo* info_for_exception = NULL;
324 if (x->needs_null_check()) {
325 info_for_exception = state_for(x);
326 }
327 // this CodeEmitInfo must not have the xhandlers because here the
328 // object is already locked (xhandlers expect object to be unlocked)
329 CodeEmitInfo* info = state_for(x, x->state(), true);
330 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
331 x->monitor_no(), info_for_exception, info);
332 }
333
334
335 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
336 assert(x->is_pinned(),"");
337
338 LIRItem obj(x->obj(), this);
339 obj.dont_load_item();
340
341 LIR_Opr lock = new_register(T_INT);
342 LIR_Opr obj_temp = new_register(T_INT);
343 set_no_result(x);
344 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
345 }
346
347
348 void LIRGenerator::do_NegateOp(NegateOp* x) {
349
350 LIRItem from(x->x(), this);
351 from.load_item();
352 LIR_Opr result = rlock_result(x);
353 __ negate (from.result(), result);
354
355 }
356
357 // for _fadd, _fmul, _fsub, _fdiv, _frem
358 // _dadd, _dmul, _dsub, _ddiv, _drem
359 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
360
361 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
362 // float remainder is implemented as a direct call into the runtime
363 LIRItem right(x->x(), this);
364 LIRItem left(x->y(), this);
365
366 BasicTypeList signature(2);
367 if (x->op() == Bytecodes::_frem) {
368 signature.append(T_FLOAT);
369 signature.append(T_FLOAT);
370 } else {
371 signature.append(T_DOUBLE);
372 signature.append(T_DOUBLE);
373 }
374 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
375
376 const LIR_Opr result_reg = result_register_for(x->type());
377 left.load_item_force(cc->at(1));
378 right.load_item();
379
380 __ move(right.result(), cc->at(0));
381
382 address entry;
383 if (x->op() == Bytecodes::_frem) {
384 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
385 } else {
386 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
387 }
388
389 LIR_Opr result = rlock_result(x);
390 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
391 __ move(result_reg, result);
392
393 return;
394 }
395
396 LIRItem left(x->x(), this);
397 LIRItem right(x->y(), this);
398 LIRItem* left_arg = &left;
399 LIRItem* right_arg = &right;
400
401 // Always load right hand side.
402 right.load_item();
403
404 if (!left.is_register())
405 left.load_item();
406
407 LIR_Opr reg = rlock(x);
408
409 arithmetic_op_fpu(x->op(), reg, left.result(), right.result());
410
411 set_result(x, round_item(reg));
412 }
413
414 // for _ladd, _lmul, _lsub, _ldiv, _lrem
415 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
416
417 // missing test if instr is commutative and if we should swap
418 LIRItem left(x->x(), this);
419 LIRItem right(x->y(), this);
420
421 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
422
423 left.load_item();
424 bool need_zero_check = true;
425 if (right.is_constant()) {
426 jlong c = right.get_jlong_constant();
427 // no need to do div-by-zero check if the divisor is a non-zero constant
428 if (c != 0) need_zero_check = false;
429 // do not load right if the divisor is a power-of-2 constant
430 if (c > 0 && is_power_of_2(c)) {
431 right.dont_load_item();
432 } else {
433 right.load_item();
434 }
435 } else {
436 right.load_item();
437 }
438 if (need_zero_check) {
439 CodeEmitInfo* info = state_for(x);
440 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
441 __ branch(lir_cond_equal, new DivByZeroStub(info));
442 }
443
444 rlock_result(x);
445 switch (x->op()) {
446 case Bytecodes::_lrem:
447 __ rem (left.result(), right.result(), x->operand());
448 break;
449 case Bytecodes::_ldiv:
450 __ div (left.result(), right.result(), x->operand());
451 break;
452 default:
453 ShouldNotReachHere();
454 break;
455 }
456
457
458 } else {
459 assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
460 "expect lmul, ladd or lsub");
461 // add, sub, mul
462 left.load_item();
463 if (! right.is_register()) {
464 if (x->op() == Bytecodes::_lmul
465 || ! right.is_constant()
466 || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
467 right.load_item();
468 } else { // add, sub
469 assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
470 // don't load constants to save register
471 right.load_nonconstant();
472 }
473 }
474 rlock_result(x);
475 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
476 }
477 }
478
479 // for: _iadd, _imul, _isub, _idiv, _irem
480 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
481
482 // Test if instr is commutative and if we should swap
483 LIRItem left(x->x(), this);
484 LIRItem right(x->y(), this);
485 LIRItem* left_arg = &left;
486 LIRItem* right_arg = &right;
487 if (x->is_commutative() && left.is_stack() && right.is_register()) {
488 // swap them if left is real stack (or cached) and right is real register(not cached)
489 left_arg = &right;
490 right_arg = &left;
491 }
492
493 left_arg->load_item();
494
495 // do not need to load right, as we can handle stack and constants
496 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
497
498 rlock_result(x);
499 bool need_zero_check = true;
500 if (right.is_constant()) {
501 jint c = right.get_jint_constant();
502 // no need to do div-by-zero check if the divisor is a non-zero constant
503 if (c != 0) need_zero_check = false;
504 // do not load right if the divisor is a power-of-2 constant
505 if (c > 0 && is_power_of_2(c)) {
506 right_arg->dont_load_item();
507 } else {
508 right_arg->load_item();
509 }
510 } else {
511 right_arg->load_item();
512 }
513 if (need_zero_check) {
514 CodeEmitInfo* info = state_for(x);
515 __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
516 __ branch(lir_cond_equal, new DivByZeroStub(info));
517 }
518
519 LIR_Opr ill = LIR_OprFact::illegalOpr;
520 if (x->op() == Bytecodes::_irem) {
521 __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
522 } else if (x->op() == Bytecodes::_idiv) {
523 __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
524 }
525
526 } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
527 if (right.is_constant()
528 && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
529 right.load_nonconstant();
530 } else {
531 right.load_item();
532 }
533 rlock_result(x);
534 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
535 } else {
536 assert (x->op() == Bytecodes::_imul, "expect imul");
537 if (right.is_constant()) {
538 jint c = right.get_jint_constant();
539 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
540 right_arg->dont_load_item();
541 } else {
542 // Cannot use constant op.
543 right_arg->load_item();
544 }
545 } else {
546 right.load_item();
547 }
548 rlock_result(x);
549 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
550 }
551 }
552
553 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
554 // when an operand with use count 1 is the left operand, then it is
555 // likely that no move for 2-operand-LIR-form is necessary
556 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
557 x->swap_operands();
558 }
559
560 ValueTag tag = x->type()->tag();
561 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
562 switch (tag) {
563 case floatTag:
564 case doubleTag: do_ArithmeticOp_FPU(x); return;
565 case longTag: do_ArithmeticOp_Long(x); return;
566 case intTag: do_ArithmeticOp_Int(x); return;
567 default: ShouldNotReachHere(); return;
568 }
569 }
570
571 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
572 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
573
574 LIRItem left(x->x(), this);
575 LIRItem right(x->y(), this);
576
577 left.load_item();
578
579 rlock_result(x);
580 if (right.is_constant()) {
581 right.dont_load_item();
582
583 switch (x->op()) {
584 case Bytecodes::_ishl: {
585 int c = right.get_jint_constant() & 0x1f;
586 __ shift_left(left.result(), c, x->operand());
587 break;
588 }
589 case Bytecodes::_ishr: {
590 int c = right.get_jint_constant() & 0x1f;
591 __ shift_right(left.result(), c, x->operand());
592 break;
593 }
594 case Bytecodes::_iushr: {
595 int c = right.get_jint_constant() & 0x1f;
596 __ unsigned_shift_right(left.result(), c, x->operand());
597 break;
598 }
599 case Bytecodes::_lshl: {
600 int c = right.get_jint_constant() & 0x3f;
601 __ shift_left(left.result(), c, x->operand());
602 break;
603 }
604 case Bytecodes::_lshr: {
605 int c = right.get_jint_constant() & 0x3f;
606 __ shift_right(left.result(), c, x->operand());
607 break;
608 }
609 case Bytecodes::_lushr: {
610 int c = right.get_jint_constant() & 0x3f;
611 __ unsigned_shift_right(left.result(), c, x->operand());
612 break;
613 }
614 default:
615 ShouldNotReachHere();
616 }
617 } else {
618 right.load_item();
619 LIR_Opr tmp = new_register(T_INT);
620 switch (x->op()) {
621 case Bytecodes::_ishl: {
622 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
623 __ shift_left(left.result(), tmp, x->operand(), tmp);
624 break;
625 }
626 case Bytecodes::_ishr: {
627 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
628 __ shift_right(left.result(), tmp, x->operand(), tmp);
629 break;
630 }
631 case Bytecodes::_iushr: {
632 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
633 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
634 break;
635 }
636 case Bytecodes::_lshl: {
637 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
638 __ shift_left(left.result(), tmp, x->operand(), tmp);
639 break;
640 }
641 case Bytecodes::_lshr: {
642 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
643 __ shift_right(left.result(), tmp, x->operand(), tmp);
644 break;
645 }
646 case Bytecodes::_lushr: {
647 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
648 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
649 break;
650 }
651 default:
652 ShouldNotReachHere();
653 }
654 }
655 }
656
657 // _iand, _land, _ior, _lor, _ixor, _lxor
658 void LIRGenerator::do_LogicOp(LogicOp* x) {
659
660 LIRItem left(x->x(), this);
661 LIRItem right(x->y(), this);
662
663 left.load_item();
664
665 rlock_result(x);
666 if (right.is_constant()
667 && ((right.type()->tag() == intTag
668 && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
669 || (right.type()->tag() == longTag
670 && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) {
671 right.dont_load_item();
672 } else {
673 right.load_item();
674 }
675 switch (x->op()) {
676 case Bytecodes::_iand:
677 case Bytecodes::_land:
678 __ logical_and(left.result(), right.result(), x->operand()); break;
679 case Bytecodes::_ior:
680 case Bytecodes::_lor:
681 __ logical_or (left.result(), right.result(), x->operand()); break;
682 case Bytecodes::_ixor:
683 case Bytecodes::_lxor:
684 __ logical_xor(left.result(), right.result(), x->operand()); break;
685 default: Unimplemented();
686 }
687 }
688
689 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
690 void LIRGenerator::do_CompareOp(CompareOp* x) {
691 LIRItem left(x->x(), this);
692 LIRItem right(x->y(), this);
693 ValueTag tag = x->x()->type()->tag();
694 if (tag == longTag) {
695 left.set_destroys_register();
696 }
697 left.load_item();
698 right.load_item();
699 LIR_Opr reg = rlock_result(x);
700
701 if (x->x()->type()->is_float_kind()) {
702 Bytecodes::Code code = x->op();
703 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
704 } else if (x->x()->type()->tag() == longTag) {
705 __ lcmp2int(left.result(), right.result(), reg);
706 } else {
707 Unimplemented();
708 }
709 }
710
711 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
712 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
713 new_value.load_item();
714 cmp_value.load_item();
715 LIR_Opr result = new_register(T_INT);
716 if (is_reference_type(type)) {
717 __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
718 } else if (type == T_INT) {
719 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
720 } else if (type == T_LONG) {
721 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
722 } else {
723 ShouldNotReachHere();
724 Unimplemented();
725 }
726 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
727 return result;
728 }
729
730 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
731 bool is_oop = is_reference_type(type);
732 LIR_Opr result = new_register(type);
733 value.load_item();
734 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
735 LIR_Opr tmp = new_register(T_INT);
736 __ xchg(addr, value.result(), result, tmp);
737 return result;
738 }
739
740 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
741 LIR_Opr result = new_register(type);
742 value.load_item();
743 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
744 LIR_Opr tmp = new_register(T_INT);
745 __ xadd(addr, value.result(), result, tmp);
746 return result;
747 }
748
749 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
750 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
751 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
752 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
753 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
754 x->id() == vmIntrinsics::_dlog10) {
755 do_LibmIntrinsic(x);
756 return;
757 }
758 switch (x->id()) {
759 case vmIntrinsics::_dabs:
760 case vmIntrinsics::_dsqrt: {
761 assert(x->number_of_arguments() == 1, "wrong type");
762 LIRItem value(x->argument_at(0), this);
763 value.load_item();
764 LIR_Opr dst = rlock_result(x);
765
766 switch (x->id()) {
767 case vmIntrinsics::_dsqrt: {
768 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
769 break;
770 }
771 case vmIntrinsics::_dabs: {
772 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
773 break;
774 }
775 default:
776 ShouldNotReachHere();
777 }
778 break;
779 }
780 default:
781 ShouldNotReachHere();
782 }
783 }
784
785 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
786 LIRItem value(x->argument_at(0), this);
787 value.set_destroys_register();
788
789 LIR_Opr calc_result = rlock_result(x);
790 LIR_Opr result_reg = result_register_for(x->type());
791
792 CallingConvention* cc = NULL;
793
794 if (x->id() == vmIntrinsics::_dpow) {
795 LIRItem value1(x->argument_at(1), this);
796
797 value1.set_destroys_register();
798
799 BasicTypeList signature(2);
800 signature.append(T_DOUBLE);
801 signature.append(T_DOUBLE);
802 cc = frame_map()->c_calling_convention(&signature);
803 value.load_item_force(cc->at(0));
804 value1.load_item_force(cc->at(1));
805 } else {
806 BasicTypeList signature(1);
807 signature.append(T_DOUBLE);
808 cc = frame_map()->c_calling_convention(&signature);
809 value.load_item_force(cc->at(0));
810 }
811
812 switch (x->id()) {
813 case vmIntrinsics::_dexp:
814 if (StubRoutines::dexp() != NULL) {
815 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
816 } else {
817 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
818 }
819 break;
820 case vmIntrinsics::_dlog:
821 if (StubRoutines::dlog() != NULL) {
822 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
823 } else {
824 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
825 }
826 break;
827 case vmIntrinsics::_dlog10:
828 if (StubRoutines::dlog10() != NULL) {
829 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
830 } else {
831 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
832 }
833 break;
834 case vmIntrinsics::_dpow:
835 if (StubRoutines::dpow() != NULL) {
836 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
837 } else {
838 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
839 }
840 break;
841 case vmIntrinsics::_dsin:
842 if (StubRoutines::dsin() != NULL) {
843 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
844 } else {
845 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
846 }
847 break;
848 case vmIntrinsics::_dcos:
849 if (StubRoutines::dcos() != NULL) {
850 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
851 } else {
852 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
853 }
854 break;
855 case vmIntrinsics::_dtan:
856 if (StubRoutines::dtan() != NULL) {
857 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
858 } else {
859 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
860 }
861 break;
862 default: ShouldNotReachHere();
863 }
864 __ move(result_reg, calc_result);
865 }
866
867
868 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
869 assert(x->number_of_arguments() == 5, "wrong type");
870
871 // Make all state_for calls early since they can emit code
872 CodeEmitInfo* info = state_for(x, x->state());
873
874 LIRItem src(x->argument_at(0), this);
875 LIRItem src_pos(x->argument_at(1), this);
876 LIRItem dst(x->argument_at(2), this);
877 LIRItem dst_pos(x->argument_at(3), this);
878 LIRItem length(x->argument_at(4), this);
879
880 // operands for arraycopy must use fixed registers, otherwise
881 // LinearScan will fail allocation (because arraycopy always needs a
882 // call)
883
884 // The java calling convention will give us enough registers
885 // so that on the stub side the args will be perfect already.
886 // On the other slow/special case side we call C and the arg
887 // positions are not similar enough to pick one as the best.
888 // Also because the java calling convention is a "shifted" version
889 // of the C convention we can process the java args trivially into C
890 // args without worry of overwriting during the xfer
891
892 src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
893 src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
894 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
895 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
896 length.load_item_force (FrameMap::as_opr(j_rarg4));
897
898 LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
899
900 set_no_result(x);
901
902 int flags;
903 ciArrayKlass* expected_type;
904 arraycopy_helper(x, &flags, &expected_type);
905
906 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
907 }
908
909 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
910 assert(UseCRC32Intrinsics, "why are we here?");
911 // Make all state_for calls early since they can emit code
912 LIR_Opr result = rlock_result(x);
913 int flags = 0;
914 switch (x->id()) {
915 case vmIntrinsics::_updateCRC32: {
916 LIRItem crc(x->argument_at(0), this);
917 LIRItem val(x->argument_at(1), this);
918 // val is destroyed by update_crc32
919 val.set_destroys_register();
920 crc.load_item();
921 val.load_item();
922 __ update_crc32(crc.result(), val.result(), result);
923 break;
924 }
925 case vmIntrinsics::_updateBytesCRC32:
926 case vmIntrinsics::_updateByteBufferCRC32: {
927 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
928
929 LIRItem crc(x->argument_at(0), this);
930 LIRItem buf(x->argument_at(1), this);
931 LIRItem off(x->argument_at(2), this);
932 LIRItem len(x->argument_at(3), this);
933 buf.load_item();
934 off.load_nonconstant();
935
936 LIR_Opr index = off.result();
937 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
938 if(off.result()->is_constant()) {
939 index = LIR_OprFact::illegalOpr;
940 offset += off.result()->as_jint();
941 }
942 LIR_Opr base_op = buf.result();
943
944 if (index->is_valid()) {
945 LIR_Opr tmp = new_register(T_LONG);
946 __ convert(Bytecodes::_i2l, index, tmp);
947 index = tmp;
948 }
949
950 if (offset) {
951 LIR_Opr tmp = new_pointer_register();
952 __ add(base_op, LIR_OprFact::intConst(offset), tmp);
953 base_op = tmp;
954 offset = 0;
955 }
956
957 LIR_Address* a = new LIR_Address(base_op,
958 index,
959 offset,
960 T_BYTE);
961 BasicTypeList signature(3);
962 signature.append(T_INT);
963 signature.append(T_ADDRESS);
964 signature.append(T_INT);
965 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
966 const LIR_Opr result_reg = result_register_for(x->type());
967
968 LIR_Opr addr = new_pointer_register();
969 __ leal(LIR_OprFact::address(a), addr);
970
971 crc.load_item_force(cc->at(0));
972 __ move(addr, cc->at(1));
973 len.load_item_force(cc->at(2));
974
975 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
976 __ move(result_reg, result);
977
978 break;
979 }
980 default: {
981 ShouldNotReachHere();
982 }
983 }
984 }
985
986 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
987 assert(UseCRC32CIntrinsics, "why are we here?");
988 // Make all state_for calls early since they can emit code
989 LIR_Opr result = rlock_result(x);
990 int flags = 0;
991 switch (x->id()) {
992 case vmIntrinsics::_updateBytesCRC32C:
993 case vmIntrinsics::_updateDirectByteBufferCRC32C: {
994 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
995 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
996
997 LIRItem crc(x->argument_at(0), this);
998 LIRItem buf(x->argument_at(1), this);
999 LIRItem off(x->argument_at(2), this);
1000 LIRItem end(x->argument_at(3), this);
1001
1002 buf.load_item();
1003 off.load_nonconstant();
1004 end.load_nonconstant();
1005
1006 // len = end - off
1007 LIR_Opr len = end.result();
1008 LIR_Opr tmpA = new_register(T_INT);
1009 LIR_Opr tmpB = new_register(T_INT);
1010 __ move(end.result(), tmpA);
1011 __ move(off.result(), tmpB);
1012 __ sub(tmpA, tmpB, tmpA);
1013 len = tmpA;
1014
1015 LIR_Opr index = off.result();
1016 if(off.result()->is_constant()) {
1017 index = LIR_OprFact::illegalOpr;
1018 offset += off.result()->as_jint();
1019 }
1020 LIR_Opr base_op = buf.result();
1021
1022 if (index->is_valid()) {
1023 LIR_Opr tmp = new_register(T_LONG);
1024 __ convert(Bytecodes::_i2l, index, tmp);
1025 index = tmp;
1026 }
1027
1028 if (offset) {
1029 LIR_Opr tmp = new_pointer_register();
1030 __ add(base_op, LIR_OprFact::intConst(offset), tmp);
1031 base_op = tmp;
1032 offset = 0;
1033 }
1034
1035 LIR_Address* a = new LIR_Address(base_op,
1036 index,
1037 offset,
1038 T_BYTE);
1039 BasicTypeList signature(3);
1040 signature.append(T_INT);
1041 signature.append(T_ADDRESS);
1042 signature.append(T_INT);
1043 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1044 const LIR_Opr result_reg = result_register_for(x->type());
1045
1046 LIR_Opr addr = new_pointer_register();
1047 __ leal(LIR_OprFact::address(a), addr);
1048
1049 crc.load_item_force(cc->at(0));
1050 __ move(addr, cc->at(1));
1051 __ move(len, cc->at(2));
1052
1053 __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args());
1054 __ move(result_reg, result);
1055
1056 break;
1057 }
1058 default: {
1059 ShouldNotReachHere();
1060 }
1061 }
1062 }
1063
1064 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1065 assert(x->number_of_arguments() == 3, "wrong type");
1066 assert(UseFMA, "Needs FMA instructions support.");
1067 LIRItem value(x->argument_at(0), this);
1068 LIRItem value1(x->argument_at(1), this);
1069 LIRItem value2(x->argument_at(2), this);
1070
1071 value.load_item();
1072 value1.load_item();
1073 value2.load_item();
1074
1075 LIR_Opr calc_input = value.result();
1076 LIR_Opr calc_input1 = value1.result();
1077 LIR_Opr calc_input2 = value2.result();
1078 LIR_Opr calc_result = rlock_result(x);
1079
1080 switch (x->id()) {
1081 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1082 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1083 default: ShouldNotReachHere();
1084 }
1085 }
1086
1087 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1088 fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1089 }
1090
1091 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1092 // _i2b, _i2c, _i2s
1093 void LIRGenerator::do_Convert(Convert* x) {
1094 LIRItem value(x->value(), this);
1095 value.load_item();
1096 LIR_Opr input = value.result();
1097 LIR_Opr result = rlock(x);
1098
1099 // arguments of lir_convert
1100 LIR_Opr conv_input = input;
1101 LIR_Opr conv_result = result;
1102
1103 __ convert(x->op(), conv_input, conv_result);
1104
1105 assert(result->is_virtual(), "result must be virtual register");
1106 set_result(x, result);
1107 }
1108
1109 void LIRGenerator::do_NewInstance(NewInstance* x) {
1110 #ifndef PRODUCT
1111 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1112 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1113 }
1114 #endif
1115 CodeEmitInfo* info = state_for(x, x->state());
1116 LIR_Opr reg = result_register_for(x->type());
1117 new_instance(reg, x->klass(), x->is_unresolved(),
1118 FrameMap::r10_oop_opr,
1119 FrameMap::r11_oop_opr,
1120 FrameMap::r4_oop_opr,
1121 LIR_OprFact::illegalOpr,
1122 FrameMap::r3_metadata_opr, info);
1123 LIR_Opr result = rlock_result(x);
1124 __ move(reg, result);
1125 }
1126
1127 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1128 CodeEmitInfo* info = state_for(x, x->state());
1129
1130 LIRItem length(x->length(), this);
1131 length.load_item_force(FrameMap::r19_opr);
1132
1133 LIR_Opr reg = result_register_for(x->type());
1134 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1135 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1136 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1137 LIR_Opr tmp4 = reg;
1138 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1139 LIR_Opr len = length.result();
1140 BasicType elem_type = x->elt_type();
1141
1142 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1143
1144 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1145 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1146
1147 LIR_Opr result = rlock_result(x);
1148 __ move(reg, result);
1149 }
1150
1151 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1152 LIRItem length(x->length(), this);
1153 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1154 // and therefore provide the state before the parameters have been consumed
1155 CodeEmitInfo* patching_info = NULL;
1156 if (!x->klass()->is_loaded() || PatchALot) {
1157 patching_info = state_for(x, x->state_before());
1158 }
1159
1160 CodeEmitInfo* info = state_for(x, x->state());
1161
1162 LIR_Opr reg = result_register_for(x->type());
1163 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1164 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1165 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1166 LIR_Opr tmp4 = reg;
1167 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1168
1169 length.load_item_force(FrameMap::r19_opr);
1170 LIR_Opr len = length.result();
1171
1172 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1173 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1174 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1175 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1176 }
1177 klass2reg_with_patching(klass_reg, obj, patching_info);
1178 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1179
1180 LIR_Opr result = rlock_result(x);
1181 __ move(reg, result);
1182 }
1183
1184
1185 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1186 Values* dims = x->dims();
1187 int i = dims->length();
1188 LIRItemList* items = new LIRItemList(i, i, NULL);
1189 while (i-- > 0) {
1190 LIRItem* size = new LIRItem(dims->at(i), this);
1191 items->at_put(i, size);
1192 }
1193
1194 // Evaluate state_for early since it may emit code.
1195 CodeEmitInfo* patching_info = NULL;
1196 if (!x->klass()->is_loaded() || PatchALot) {
1197 patching_info = state_for(x, x->state_before());
1198
1199 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1200 // clone all handlers (NOTE: Usually this is handled transparently
1201 // by the CodeEmitInfo cloning logic in CodeStub constructors but
1202 // is done explicitly here because a stub isn't being used).
1203 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1204 }
1205 CodeEmitInfo* info = state_for(x, x->state());
1206
1207 i = dims->length();
1208 while (i-- > 0) {
1209 LIRItem* size = items->at(i);
1210 size->load_item();
1211
1212 store_stack_parameter(size->result(), in_ByteSize(i*4));
1213 }
1214
1215 LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1216 klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1217
1218 LIR_Opr rank = FrameMap::r19_opr;
1219 __ move(LIR_OprFact::intConst(x->rank()), rank);
1220 LIR_Opr varargs = FrameMap::r2_opr;
1221 __ move(FrameMap::sp_opr, varargs);
1222 LIR_OprList* args = new LIR_OprList(3);
1223 args->append(klass_reg);
1224 args->append(rank);
1225 args->append(varargs);
1226 LIR_Opr reg = result_register_for(x->type());
1227 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1228 LIR_OprFact::illegalOpr,
1229 reg, args, info);
1230
1231 LIR_Opr result = rlock_result(x);
1232 __ move(reg, result);
1233 }
1234
1235 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1236 // nothing to do for now
1237 }
1238
1239 void LIRGenerator::do_CheckCast(CheckCast* x) {
1240 LIRItem obj(x->obj(), this);
1241
1242 CodeEmitInfo* patching_info = NULL;
1243 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1244 // must do this before locking the destination register as an oop register,
1245 // and before the obj is loaded (the latter is for deoptimization)
1246 patching_info = state_for(x, x->state_before());
1247 }
1248 obj.load_item();
1249
1250 // info for exceptions
1251 CodeEmitInfo* info_for_exception =
1252 (x->needs_exception_state() ? state_for(x) :
1253 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1254
1255 CodeStub* stub;
1256 if (x->is_incompatible_class_change_check()) {
1257 assert(patching_info == NULL, "can't patch this");
1258 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1259 } else if (x->is_invokespecial_receiver_check()) {
1260 assert(patching_info == NULL, "can't patch this");
1261 stub = new DeoptimizeStub(info_for_exception,
1262 Deoptimization::Reason_class_check,
1263 Deoptimization::Action_none);
1264 } else {
1265 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1266 }
1267 LIR_Opr reg = rlock_result(x);
1268 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1269 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1270 tmp3 = new_register(objectType);
1271 }
1272 __ checkcast(reg, obj.result(), x->klass(),
1273 new_register(objectType), new_register(objectType), tmp3,
1274 x->direct_compare(), info_for_exception, patching_info, stub,
1275 x->profiled_method(), x->profiled_bci());
1276 }
1277
1278 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1279 LIRItem obj(x->obj(), this);
1280
1281 // result and test object may not be in same register
1282 LIR_Opr reg = rlock_result(x);
1283 CodeEmitInfo* patching_info = NULL;
1284 if ((!x->klass()->is_loaded() || PatchALot)) {
1285 // must do this before locking the destination register as an oop register
1286 patching_info = state_for(x, x->state_before());
1287 }
1288 obj.load_item();
1289 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1290 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1291 tmp3 = new_register(objectType);
1292 }
1293 __ instanceof(reg, obj.result(), x->klass(),
1294 new_register(objectType), new_register(objectType), tmp3,
1295 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1296 }
1297
1298 void LIRGenerator::do_If(If* x) {
1299 assert(x->number_of_sux() == 2, "inconsistency");
1300 ValueTag tag = x->x()->type()->tag();
1301 bool is_safepoint = x->is_safepoint();
1302
1303 If::Condition cond = x->cond();
1304
1305 LIRItem xitem(x->x(), this);
1306 LIRItem yitem(x->y(), this);
1307 LIRItem* xin = &xitem;
1308 LIRItem* yin = &yitem;
1309
1310 if (tag == longTag) {
1311 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1312 // mirror for other conditions
1313 if (cond == If::gtr || cond == If::leq) {
1314 cond = Instruction::mirror(cond);
1315 xin = &yitem;
1316 yin = &xitem;
1317 }
1318 xin->set_destroys_register();
1319 }
1320 xin->load_item();
1321
1322 if (tag == longTag) {
1323 if (yin->is_constant()
1324 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1325 yin->dont_load_item();
1326 } else {
1327 yin->load_item();
1328 }
1329 } else if (tag == intTag) {
1330 if (yin->is_constant()
1331 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) {
1332 yin->dont_load_item();
1333 } else {
1334 yin->load_item();
1335 }
1336 } else {
1337 yin->load_item();
1338 }
1339
1340 set_no_result(x);
1341
1342 LIR_Opr left = xin->result();
1343 LIR_Opr right = yin->result();
1344
1345 // add safepoint before generating condition code so it can be recomputed
1346 if (x->is_safepoint()) {
1347 // increment backedge counter if needed
1348 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1349 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1350 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1351 }
1352
1353 __ cmp(lir_cond(cond), left, right);
1354 // Generate branch profiling. Profiling code doesn't kill flags.
1355 profile_branch(x, cond);
1356 move_to_phi(x->state());
1357 if (x->x()->type()->is_float_kind()) {
1358 __ branch(lir_cond(cond), x->tsux(), x->usux());
1359 } else {
1360 __ branch(lir_cond(cond), x->tsux());
1361 }
1362 assert(x->default_sux() == x->fsux(), "wrong destination above");
1363 __ jump(x->default_sux());
1364 }
1365
1366 LIR_Opr LIRGenerator::getThreadPointer() {
1367 return FrameMap::as_pointer_opr(rthread);
1368 }
1369
1370 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1371
1372 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1373 CodeEmitInfo* info) {
1374 __ volatile_store_mem_reg(value, address, info);
1375 }
1376
1377 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1378 CodeEmitInfo* info) {
1379 // 8179954: We need to make sure that the code generated for
1380 // volatile accesses forms a sequentially-consistent set of
1381 // operations when combined with STLR and LDAR. Without a leading
1382 // membar it's possible for a simple Dekker test to fail if loads
1383 // use LD;DMB but stores use STLR. This can happen if C2 compiles
1384 // the stores in one method and C1 compiles the loads in another.
1385 if (!CompilerConfig::is_c1_only_no_jvmci()) {
1386 __ membar();
1387 }
1388 __ volatile_load_mem_reg(address, result, info);
1389 }