1 /*
2 * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_aarch64.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
50 void LIRItem::load_byte_item() {
51 load_item();
52 }
53
54
55 void LIRItem::load_nonconstant() {
56 LIR_Opr r = value()->operand();
57 if (r->is_constant()) {
58 _result = r;
59 } else {
60 load_item();
61 }
62 }
63
64 //--------------------------------------------------------------
65 // LIRGenerator
66 //--------------------------------------------------------------
67
68
69 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
70 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; }
71 LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
72 LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
73 LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
74 LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
75 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
76 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; }
77 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }
78
79
80 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
81 LIR_Opr opr;
82 switch (type->tag()) {
83 case intTag: opr = FrameMap::r0_opr; break;
84 case objectTag: opr = FrameMap::r0_oop_opr; break;
85 case longTag: opr = FrameMap::long0_opr; break;
86 case floatTag: opr = FrameMap::fpu0_float_opr; break;
87 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
88
89 case addressTag:
90 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
91 }
92
93 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
94 return opr;
95 }
96
97
98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
99 LIR_Opr reg = new_register(T_INT);
100 set_vreg_flag(reg, LIRGenerator::byte_reg);
101 return reg;
102 }
103
104
105 //--------- loading items into registers --------------------------------
106
107
108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
109 if (v->type()->as_IntConstant() != NULL) {
110 return v->type()->as_IntConstant()->value() == 0L;
111 } else if (v->type()->as_LongConstant() != NULL) {
112 return v->type()->as_LongConstant()->value() == 0L;
113 } else if (v->type()->as_ObjectConstant() != NULL) {
114 return v->type()->as_ObjectConstant()->value()->is_null_object();
115 } else {
116 return false;
117 }
118 }
119
120 bool LIRGenerator::can_inline_as_constant(Value v) const {
121 // FIXME: Just a guess
122 if (v->type()->as_IntConstant() != NULL) {
123 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
124 } else if (v->type()->as_LongConstant() != NULL) {
125 return v->type()->as_LongConstant()->value() == 0L;
126 } else if (v->type()->as_ObjectConstant() != NULL) {
127 return v->type()->as_ObjectConstant()->value()->is_null_object();
128 } else {
129 return false;
130 }
131 }
132
133
134 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
135
136
137 LIR_Opr LIRGenerator::safepoint_poll_register() {
138 return LIR_OprFact::illegalOpr;
139 }
140
141
142 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
143 int shift, int disp, BasicType type) {
144 assert(base->is_register(), "must be");
145 intx large_disp = disp;
146
147 // accumulate fixed displacements
148 if (index->is_constant()) {
149 LIR_Const *constant = index->as_constant_ptr();
150 if (constant->type() == T_INT) {
151 large_disp += ((intx)index->as_jint()) << shift;
152 } else {
153 assert(constant->type() == T_LONG, "should be");
154 jlong c = index->as_jlong() << shift;
155 if ((jlong)((jint)c) == c) {
156 large_disp += c;
157 index = LIR_OprFact::illegalOpr;
158 } else {
159 LIR_Opr tmp = new_register(T_LONG);
160 __ move(index, tmp);
161 index = tmp;
162 // apply shift and displacement below
163 }
164 }
165 }
166
167 if (index->is_register()) {
168 // apply the shift and accumulate the displacement
169 if (shift > 0) {
170 LIR_Opr tmp = new_pointer_register();
171 __ shift_left(index, shift, tmp);
172 index = tmp;
173 }
174 if (large_disp != 0) {
175 LIR_Opr tmp = new_pointer_register();
176 if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
177 __ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
178 index = tmp;
179 } else {
180 __ move(LIR_OprFact::intptrConst(large_disp), tmp);
181 __ add(tmp, index, tmp);
182 index = tmp;
183 }
184 large_disp = 0;
185 }
186 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
187 // index is illegal so replace it with the displacement loaded into a register
188 index = new_pointer_register();
189 __ move(LIR_OprFact::intptrConst(large_disp), index);
190 large_disp = 0;
191 }
192
193 // at this point we either have base + index or base + displacement
194 if (large_disp == 0 && index->is_register()) {
195 return new LIR_Address(base, index, type);
196 } else {
197 assert(Address::offset_ok_for_immed(large_disp, shift), "failed for large_disp: " INTPTR_FORMAT " and shift %d", large_disp, shift);
198 return new LIR_Address(base, large_disp, type);
199 }
200 }
201
202 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
203 BasicType type) {
204 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
205 int elem_size = type2aelembytes(type);
206 int shift = exact_log2(elem_size);
207 return generate_address(array_opr, index_opr, shift, offset_in_bytes, type);
208 }
209
210 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
211 LIR_Opr r;
212 if (type == T_LONG) {
213 r = LIR_OprFact::longConst(x);
214 if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
215 LIR_Opr tmp = new_register(type);
216 __ move(r, tmp);
217 return tmp;
218 }
219 } else if (type == T_INT) {
220 r = LIR_OprFact::intConst(x);
221 if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
222 // This is all rather nasty. We don't know whether our constant
223 // is required for a logical or an arithmetic operation, wo we
224 // don't know what the range of valid values is!!
225 LIR_Opr tmp = new_register(type);
226 __ move(r, tmp);
227 return tmp;
228 }
229 } else {
230 ShouldNotReachHere();
231 r = NULL; // unreachable
232 }
233 return r;
234 }
235
236
237
238 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
239 LIR_Opr pointer = new_pointer_register();
240 __ move(LIR_OprFact::intptrConst(counter), pointer);
241 LIR_Address* addr = new LIR_Address(pointer, type);
242 increment_counter(addr, step);
243 }
244
245
246 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
247 LIR_Opr imm = NULL;
248 switch(addr->type()) {
249 case T_INT:
250 imm = LIR_OprFact::intConst(step);
251 break;
252 case T_LONG:
253 imm = LIR_OprFact::longConst(step);
254 break;
255 default:
256 ShouldNotReachHere();
257 }
258 LIR_Opr reg = new_register(addr->type());
259 __ load(addr, reg);
260 __ add(reg, imm, reg);
261 __ store(reg, addr);
262 }
263
264 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
265 LIR_Opr reg = new_register(T_INT);
266 __ load(generate_address(base, disp, T_INT), reg, info);
267 __ cmp(condition, reg, LIR_OprFact::intConst(c));
268 }
269
270 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
271 LIR_Opr reg1 = new_register(T_INT);
272 __ load(generate_address(base, disp, type), reg1, info);
273 __ cmp(condition, reg, reg1);
274 }
275
276
277 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
278
279 if (is_power_of_2(c - 1)) {
280 __ shift_left(left, exact_log2(c - 1), tmp);
281 __ add(tmp, left, result);
282 return true;
283 } else if (is_power_of_2(c + 1)) {
284 __ shift_left(left, exact_log2(c + 1), tmp);
285 __ sub(tmp, left, result);
286 return true;
287 } else {
288 return false;
289 }
290 }
291
292 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
293 BasicType type = item->type();
294 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
295 }
296
297 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
298 LIR_Opr tmp1 = new_register(objectType);
299 LIR_Opr tmp2 = new_register(objectType);
300 LIR_Opr tmp3 = new_register(objectType);
301 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
302 }
303
304 //----------------------------------------------------------------------
305 // visitor functions
306 //----------------------------------------------------------------------
307
308 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
309 assert(x->is_pinned(),"");
310 LIRItem obj(x->obj(), this);
311 obj.load_item();
312
313 set_no_result(x);
314
315 // "lock" stores the address of the monitor stack slot, so this is not an oop
316 LIR_Opr lock = new_register(T_INT);
317 LIR_Opr scratch = new_register(T_INT);
318
319 CodeEmitInfo* info_for_exception = NULL;
320 if (x->needs_null_check()) {
321 info_for_exception = state_for(x);
322 }
323 // this CodeEmitInfo must not have the xhandlers because here the
324 // object is already locked (xhandlers expect object to be unlocked)
325 CodeEmitInfo* info = state_for(x, x->state(), true);
326 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
327 x->monitor_no(), info_for_exception, info);
328 }
329
330
331 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
332 assert(x->is_pinned(),"");
333
334 LIRItem obj(x->obj(), this);
335 obj.dont_load_item();
336
337 LIR_Opr lock = new_register(T_INT);
338 LIR_Opr obj_temp = new_register(T_INT);
339 LIR_Opr scratch = new_register(T_INT);
340 set_no_result(x);
341 monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
342 }
343
344
345 void LIRGenerator::do_NegateOp(NegateOp* x) {
346
347 LIRItem from(x->x(), this);
348 from.load_item();
349 LIR_Opr result = rlock_result(x);
350 __ negate (from.result(), result);
351
352 }
353
354 // for _fadd, _fmul, _fsub, _fdiv, _frem
355 // _dadd, _dmul, _dsub, _ddiv, _drem
356 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
357
358 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
359 // float remainder is implemented as a direct call into the runtime
360 LIRItem right(x->x(), this);
361 LIRItem left(x->y(), this);
362
363 BasicTypeList signature(2);
364 if (x->op() == Bytecodes::_frem) {
365 signature.append(T_FLOAT);
366 signature.append(T_FLOAT);
367 } else {
368 signature.append(T_DOUBLE);
369 signature.append(T_DOUBLE);
370 }
371 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
372
373 const LIR_Opr result_reg = result_register_for(x->type());
374 left.load_item_force(cc->at(1));
375 right.load_item();
376
377 __ move(right.result(), cc->at(0));
378
379 address entry;
380 if (x->op() == Bytecodes::_frem) {
381 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
382 } else {
383 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
384 }
385
386 LIR_Opr result = rlock_result(x);
387 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
388 __ move(result_reg, result);
389
390 return;
391 }
392
393 LIRItem left(x->x(), this);
394 LIRItem right(x->y(), this);
395 LIRItem* left_arg = &left;
396 LIRItem* right_arg = &right;
397
398 // Always load right hand side.
399 right.load_item();
400
401 if (!left.is_register())
402 left.load_item();
403
404 LIR_Opr reg = rlock(x);
405
406 arithmetic_op_fpu(x->op(), reg, left.result(), right.result());
407
408 set_result(x, round_item(reg));
409 }
410
411 // for _ladd, _lmul, _lsub, _ldiv, _lrem
412 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
413
414 // missing test if instr is commutative and if we should swap
415 LIRItem left(x->x(), this);
416 LIRItem right(x->y(), this);
417
418 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
419
420 left.load_item();
421 bool need_zero_check = true;
422 if (right.is_constant()) {
423 jlong c = right.get_jlong_constant();
424 // no need to do div-by-zero check if the divisor is a non-zero constant
425 if (c != 0) need_zero_check = false;
426 // do not load right if the divisor is a power-of-2 constant
427 if (c > 0 && is_power_of_2(c)) {
428 right.dont_load_item();
429 } else {
430 right.load_item();
431 }
432 } else {
433 right.load_item();
434 }
435 if (need_zero_check) {
436 CodeEmitInfo* info = state_for(x);
437 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
438 __ branch(lir_cond_equal, new DivByZeroStub(info));
439 }
440
441 rlock_result(x);
442 switch (x->op()) {
443 case Bytecodes::_lrem:
444 __ rem (left.result(), right.result(), x->operand());
445 break;
446 case Bytecodes::_ldiv:
447 __ div (left.result(), right.result(), x->operand());
448 break;
449 default:
450 ShouldNotReachHere();
451 break;
452 }
453
454
455 } else {
456 assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
457 "expect lmul, ladd or lsub");
458 // add, sub, mul
459 left.load_item();
460 if (! right.is_register()) {
461 if (x->op() == Bytecodes::_lmul
462 || ! right.is_constant()
463 || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
464 right.load_item();
465 } else { // add, sub
466 assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
467 // don't load constants to save register
468 right.load_nonconstant();
469 }
470 }
471 rlock_result(x);
472 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
473 }
474 }
475
476 // for: _iadd, _imul, _isub, _idiv, _irem
477 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
478
479 // Test if instr is commutative and if we should swap
480 LIRItem left(x->x(), this);
481 LIRItem right(x->y(), this);
482 LIRItem* left_arg = &left;
483 LIRItem* right_arg = &right;
484 if (x->is_commutative() && left.is_stack() && right.is_register()) {
485 // swap them if left is real stack (or cached) and right is real register(not cached)
486 left_arg = &right;
487 right_arg = &left;
488 }
489
490 left_arg->load_item();
491
492 // do not need to load right, as we can handle stack and constants
493 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
494
495 rlock_result(x);
496 bool need_zero_check = true;
497 if (right.is_constant()) {
498 jint c = right.get_jint_constant();
499 // no need to do div-by-zero check if the divisor is a non-zero constant
500 if (c != 0) need_zero_check = false;
501 // do not load right if the divisor is a power-of-2 constant
502 if (c > 0 && is_power_of_2(c)) {
503 right_arg->dont_load_item();
504 } else {
505 right_arg->load_item();
506 }
507 } else {
508 right_arg->load_item();
509 }
510 if (need_zero_check) {
511 CodeEmitInfo* info = state_for(x);
512 __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
513 __ branch(lir_cond_equal, new DivByZeroStub(info));
514 }
515
516 LIR_Opr ill = LIR_OprFact::illegalOpr;
517 if (x->op() == Bytecodes::_irem) {
518 __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
519 } else if (x->op() == Bytecodes::_idiv) {
520 __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
521 }
522
523 } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
524 if (right.is_constant()
525 && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
526 right.load_nonconstant();
527 } else {
528 right.load_item();
529 }
530 rlock_result(x);
531 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
532 } else {
533 assert (x->op() == Bytecodes::_imul, "expect imul");
534 if (right.is_constant()) {
535 jint c = right.get_jint_constant();
536 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
537 right_arg->dont_load_item();
538 } else {
539 // Cannot use constant op.
540 right_arg->load_item();
541 }
542 } else {
543 right.load_item();
544 }
545 rlock_result(x);
546 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
547 }
548 }
549
550 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
551 // when an operand with use count 1 is the left operand, then it is
552 // likely that no move for 2-operand-LIR-form is necessary
553 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
554 x->swap_operands();
555 }
556
557 ValueTag tag = x->type()->tag();
558 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
559 switch (tag) {
560 case floatTag:
561 case doubleTag: do_ArithmeticOp_FPU(x); return;
562 case longTag: do_ArithmeticOp_Long(x); return;
563 case intTag: do_ArithmeticOp_Int(x); return;
564 default: ShouldNotReachHere(); return;
565 }
566 }
567
568 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
569 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
570
571 LIRItem left(x->x(), this);
572 LIRItem right(x->y(), this);
573
574 left.load_item();
575
576 rlock_result(x);
577 if (right.is_constant()) {
578 right.dont_load_item();
579
580 switch (x->op()) {
581 case Bytecodes::_ishl: {
582 int c = right.get_jint_constant() & 0x1f;
583 __ shift_left(left.result(), c, x->operand());
584 break;
585 }
586 case Bytecodes::_ishr: {
587 int c = right.get_jint_constant() & 0x1f;
588 __ shift_right(left.result(), c, x->operand());
589 break;
590 }
591 case Bytecodes::_iushr: {
592 int c = right.get_jint_constant() & 0x1f;
593 __ unsigned_shift_right(left.result(), c, x->operand());
594 break;
595 }
596 case Bytecodes::_lshl: {
597 int c = right.get_jint_constant() & 0x3f;
598 __ shift_left(left.result(), c, x->operand());
599 break;
600 }
601 case Bytecodes::_lshr: {
602 int c = right.get_jint_constant() & 0x3f;
603 __ shift_right(left.result(), c, x->operand());
604 break;
605 }
606 case Bytecodes::_lushr: {
607 int c = right.get_jint_constant() & 0x3f;
608 __ unsigned_shift_right(left.result(), c, x->operand());
609 break;
610 }
611 default:
612 ShouldNotReachHere();
613 }
614 } else {
615 right.load_item();
616 LIR_Opr tmp = new_register(T_INT);
617 switch (x->op()) {
618 case Bytecodes::_ishl: {
619 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
620 __ shift_left(left.result(), tmp, x->operand(), tmp);
621 break;
622 }
623 case Bytecodes::_ishr: {
624 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
625 __ shift_right(left.result(), tmp, x->operand(), tmp);
626 break;
627 }
628 case Bytecodes::_iushr: {
629 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
630 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
631 break;
632 }
633 case Bytecodes::_lshl: {
634 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
635 __ shift_left(left.result(), tmp, x->operand(), tmp);
636 break;
637 }
638 case Bytecodes::_lshr: {
639 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
640 __ shift_right(left.result(), tmp, x->operand(), tmp);
641 break;
642 }
643 case Bytecodes::_lushr: {
644 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
645 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
646 break;
647 }
648 default:
649 ShouldNotReachHere();
650 }
651 }
652 }
653
654 // _iand, _land, _ior, _lor, _ixor, _lxor
655 void LIRGenerator::do_LogicOp(LogicOp* x) {
656
657 LIRItem left(x->x(), this);
658 LIRItem right(x->y(), this);
659
660 left.load_item();
661
662 rlock_result(x);
663 if (right.is_constant()
664 && ((right.type()->tag() == intTag
665 && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
666 || (right.type()->tag() == longTag
667 && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) {
668 right.dont_load_item();
669 } else {
670 right.load_item();
671 }
672 switch (x->op()) {
673 case Bytecodes::_iand:
674 case Bytecodes::_land:
675 __ logical_and(left.result(), right.result(), x->operand()); break;
676 case Bytecodes::_ior:
677 case Bytecodes::_lor:
678 __ logical_or (left.result(), right.result(), x->operand()); break;
679 case Bytecodes::_ixor:
680 case Bytecodes::_lxor:
681 __ logical_xor(left.result(), right.result(), x->operand()); break;
682 default: Unimplemented();
683 }
684 }
685
686 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
687 void LIRGenerator::do_CompareOp(CompareOp* x) {
688 LIRItem left(x->x(), this);
689 LIRItem right(x->y(), this);
690 ValueTag tag = x->x()->type()->tag();
691 if (tag == longTag) {
692 left.set_destroys_register();
693 }
694 left.load_item();
695 right.load_item();
696 LIR_Opr reg = rlock_result(x);
697
698 if (x->x()->type()->is_float_kind()) {
699 Bytecodes::Code code = x->op();
700 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
701 } else if (x->x()->type()->tag() == longTag) {
702 __ lcmp2int(left.result(), right.result(), reg);
703 } else {
704 Unimplemented();
705 }
706 }
707
708 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
709 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
710 new_value.load_item();
711 cmp_value.load_item();
712 LIR_Opr result = new_register(T_INT);
713 if (is_reference_type(type)) {
714 __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
715 } else if (type == T_INT) {
716 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
717 } else if (type == T_LONG) {
718 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
719 } else {
720 ShouldNotReachHere();
721 Unimplemented();
722 }
723 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
724 return result;
725 }
726
727 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
728 bool is_oop = is_reference_type(type);
729 LIR_Opr result = new_register(type);
730 value.load_item();
731 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
732 LIR_Opr tmp = new_register(T_INT);
733 __ xchg(addr, value.result(), result, tmp);
734 return result;
735 }
736
737 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
738 LIR_Opr result = new_register(type);
739 value.load_item();
740 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
741 LIR_Opr tmp = new_register(T_INT);
742 __ xadd(addr, value.result(), result, tmp);
743 return result;
744 }
745
746 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
747 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
748 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
749 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
750 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
751 x->id() == vmIntrinsics::_dlog10) {
752 do_LibmIntrinsic(x);
753 return;
754 }
755 switch (x->id()) {
756 case vmIntrinsics::_dabs:
757 case vmIntrinsics::_dsqrt: {
758 assert(x->number_of_arguments() == 1, "wrong type");
759 LIRItem value(x->argument_at(0), this);
760 value.load_item();
761 LIR_Opr dst = rlock_result(x);
762
763 switch (x->id()) {
764 case vmIntrinsics::_dsqrt: {
765 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
766 break;
767 }
768 case vmIntrinsics::_dabs: {
769 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
770 break;
771 }
772 default:
773 ShouldNotReachHere();
774 }
775 break;
776 }
777 default:
778 ShouldNotReachHere();
779 }
780 }
781
782 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
783 LIRItem value(x->argument_at(0), this);
784 value.set_destroys_register();
785
786 LIR_Opr calc_result = rlock_result(x);
787 LIR_Opr result_reg = result_register_for(x->type());
788
789 CallingConvention* cc = NULL;
790
791 if (x->id() == vmIntrinsics::_dpow) {
792 LIRItem value1(x->argument_at(1), this);
793
794 value1.set_destroys_register();
795
796 BasicTypeList signature(2);
797 signature.append(T_DOUBLE);
798 signature.append(T_DOUBLE);
799 cc = frame_map()->c_calling_convention(&signature);
800 value.load_item_force(cc->at(0));
801 value1.load_item_force(cc->at(1));
802 } else {
803 BasicTypeList signature(1);
804 signature.append(T_DOUBLE);
805 cc = frame_map()->c_calling_convention(&signature);
806 value.load_item_force(cc->at(0));
807 }
808
809 switch (x->id()) {
810 case vmIntrinsics::_dexp:
811 if (StubRoutines::dexp() != NULL) {
812 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
813 } else {
814 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
815 }
816 break;
817 case vmIntrinsics::_dlog:
818 if (StubRoutines::dlog() != NULL) {
819 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
820 } else {
821 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
822 }
823 break;
824 case vmIntrinsics::_dlog10:
825 if (StubRoutines::dlog10() != NULL) {
826 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
827 } else {
828 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
829 }
830 break;
831 case vmIntrinsics::_dpow:
832 if (StubRoutines::dpow() != NULL) {
833 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
834 } else {
835 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
836 }
837 break;
838 case vmIntrinsics::_dsin:
839 if (StubRoutines::dsin() != NULL) {
840 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
841 } else {
842 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
843 }
844 break;
845 case vmIntrinsics::_dcos:
846 if (StubRoutines::dcos() != NULL) {
847 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
848 } else {
849 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
850 }
851 break;
852 case vmIntrinsics::_dtan:
853 if (StubRoutines::dtan() != NULL) {
854 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
855 } else {
856 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
857 }
858 break;
859 default: ShouldNotReachHere();
860 }
861 __ move(result_reg, calc_result);
862 }
863
864
865 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
866 assert(x->number_of_arguments() == 5, "wrong type");
867
868 // Make all state_for calls early since they can emit code
869 CodeEmitInfo* info = state_for(x, x->state());
870
871 LIRItem src(x->argument_at(0), this);
872 LIRItem src_pos(x->argument_at(1), this);
873 LIRItem dst(x->argument_at(2), this);
874 LIRItem dst_pos(x->argument_at(3), this);
875 LIRItem length(x->argument_at(4), this);
876
877 // operands for arraycopy must use fixed registers, otherwise
878 // LinearScan will fail allocation (because arraycopy always needs a
879 // call)
880
881 // The java calling convention will give us enough registers
882 // so that on the stub side the args will be perfect already.
883 // On the other slow/special case side we call C and the arg
884 // positions are not similar enough to pick one as the best.
885 // Also because the java calling convention is a "shifted" version
886 // of the C convention we can process the java args trivially into C
887 // args without worry of overwriting during the xfer
888
889 src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
890 src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
891 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
892 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
893 length.load_item_force (FrameMap::as_opr(j_rarg4));
894
895 LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
896
897 set_no_result(x);
898
899 int flags;
900 ciArrayKlass* expected_type;
901 arraycopy_helper(x, &flags, &expected_type);
902
903 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
904 }
905
906 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
907 assert(UseCRC32Intrinsics, "why are we here?");
908 // Make all state_for calls early since they can emit code
909 LIR_Opr result = rlock_result(x);
910 int flags = 0;
911 switch (x->id()) {
912 case vmIntrinsics::_updateCRC32: {
913 LIRItem crc(x->argument_at(0), this);
914 LIRItem val(x->argument_at(1), this);
915 // val is destroyed by update_crc32
916 val.set_destroys_register();
917 crc.load_item();
918 val.load_item();
919 __ update_crc32(crc.result(), val.result(), result);
920 break;
921 }
922 case vmIntrinsics::_updateBytesCRC32:
923 case vmIntrinsics::_updateByteBufferCRC32: {
924 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
925
926 LIRItem crc(x->argument_at(0), this);
927 LIRItem buf(x->argument_at(1), this);
928 LIRItem off(x->argument_at(2), this);
929 LIRItem len(x->argument_at(3), this);
930 buf.load_item();
931 off.load_nonconstant();
932
933 LIR_Opr index = off.result();
934 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
935 if(off.result()->is_constant()) {
936 index = LIR_OprFact::illegalOpr;
937 offset += off.result()->as_jint();
938 }
939 LIR_Opr base_op = buf.result();
940
941 if (index->is_valid()) {
942 LIR_Opr tmp = new_register(T_LONG);
943 __ convert(Bytecodes::_i2l, index, tmp);
944 index = tmp;
945 }
946
947 if (offset) {
948 LIR_Opr tmp = new_pointer_register();
949 __ add(base_op, LIR_OprFact::intConst(offset), tmp);
950 base_op = tmp;
951 offset = 0;
952 }
953
954 LIR_Address* a = new LIR_Address(base_op,
955 index,
956 offset,
957 T_BYTE);
958 BasicTypeList signature(3);
959 signature.append(T_INT);
960 signature.append(T_ADDRESS);
961 signature.append(T_INT);
962 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
963 const LIR_Opr result_reg = result_register_for(x->type());
964
965 LIR_Opr addr = new_pointer_register();
966 __ leal(LIR_OprFact::address(a), addr);
967
968 crc.load_item_force(cc->at(0));
969 __ move(addr, cc->at(1));
970 len.load_item_force(cc->at(2));
971
972 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
973 __ move(result_reg, result);
974
975 break;
976 }
977 default: {
978 ShouldNotReachHere();
979 }
980 }
981 }
982
983 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
984 assert(UseCRC32CIntrinsics, "why are we here?");
985 // Make all state_for calls early since they can emit code
986 LIR_Opr result = rlock_result(x);
987 int flags = 0;
988 switch (x->id()) {
989 case vmIntrinsics::_updateBytesCRC32C:
990 case vmIntrinsics::_updateDirectByteBufferCRC32C: {
991 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
992 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
993
994 LIRItem crc(x->argument_at(0), this);
995 LIRItem buf(x->argument_at(1), this);
996 LIRItem off(x->argument_at(2), this);
997 LIRItem end(x->argument_at(3), this);
998
999 buf.load_item();
1000 off.load_nonconstant();
1001 end.load_nonconstant();
1002
1003 // len = end - off
1004 LIR_Opr len = end.result();
1005 LIR_Opr tmpA = new_register(T_INT);
1006 LIR_Opr tmpB = new_register(T_INT);
1007 __ move(end.result(), tmpA);
1008 __ move(off.result(), tmpB);
1009 __ sub(tmpA, tmpB, tmpA);
1010 len = tmpA;
1011
1012 LIR_Opr index = off.result();
1013 if(off.result()->is_constant()) {
1014 index = LIR_OprFact::illegalOpr;
1015 offset += off.result()->as_jint();
1016 }
1017 LIR_Opr base_op = buf.result();
1018
1019 if (index->is_valid()) {
1020 LIR_Opr tmp = new_register(T_LONG);
1021 __ convert(Bytecodes::_i2l, index, tmp);
1022 index = tmp;
1023 }
1024
1025 if (offset) {
1026 LIR_Opr tmp = new_pointer_register();
1027 __ add(base_op, LIR_OprFact::intConst(offset), tmp);
1028 base_op = tmp;
1029 offset = 0;
1030 }
1031
1032 LIR_Address* a = new LIR_Address(base_op,
1033 index,
1034 offset,
1035 T_BYTE);
1036 BasicTypeList signature(3);
1037 signature.append(T_INT);
1038 signature.append(T_ADDRESS);
1039 signature.append(T_INT);
1040 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1041 const LIR_Opr result_reg = result_register_for(x->type());
1042
1043 LIR_Opr addr = new_pointer_register();
1044 __ leal(LIR_OprFact::address(a), addr);
1045
1046 crc.load_item_force(cc->at(0));
1047 __ move(addr, cc->at(1));
1048 __ move(len, cc->at(2));
1049
1050 __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args());
1051 __ move(result_reg, result);
1052
1053 break;
1054 }
1055 default: {
1056 ShouldNotReachHere();
1057 }
1058 }
1059 }
1060
1061 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1062 assert(x->number_of_arguments() == 3, "wrong type");
1063 assert(UseFMA, "Needs FMA instructions support.");
1064 LIRItem value(x->argument_at(0), this);
1065 LIRItem value1(x->argument_at(1), this);
1066 LIRItem value2(x->argument_at(2), this);
1067
1068 value.load_item();
1069 value1.load_item();
1070 value2.load_item();
1071
1072 LIR_Opr calc_input = value.result();
1073 LIR_Opr calc_input1 = value1.result();
1074 LIR_Opr calc_input2 = value2.result();
1075 LIR_Opr calc_result = rlock_result(x);
1076
1077 switch (x->id()) {
1078 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1079 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1080 default: ShouldNotReachHere();
1081 }
1082 }
1083
1084 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1085 fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1086 }
1087
1088 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1089 // _i2b, _i2c, _i2s
1090 void LIRGenerator::do_Convert(Convert* x) {
1091 LIRItem value(x->value(), this);
1092 value.load_item();
1093 LIR_Opr input = value.result();
1094 LIR_Opr result = rlock(x);
1095
1096 // arguments of lir_convert
1097 LIR_Opr conv_input = input;
1098 LIR_Opr conv_result = result;
1099
1100 __ convert(x->op(), conv_input, conv_result);
1101
1102 assert(result->is_virtual(), "result must be virtual register");
1103 set_result(x, result);
1104 }
1105
1106 void LIRGenerator::do_NewInstance(NewInstance* x) {
1107 #ifndef PRODUCT
1108 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1109 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1110 }
1111 #endif
1112 CodeEmitInfo* info = state_for(x, x->state());
1113 LIR_Opr reg = result_register_for(x->type());
1114 new_instance(reg, x->klass(), x->is_unresolved(),
1115 FrameMap::r10_oop_opr,
1116 FrameMap::r11_oop_opr,
1117 FrameMap::r4_oop_opr,
1118 LIR_OprFact::illegalOpr,
1119 FrameMap::r3_metadata_opr, info);
1120 LIR_Opr result = rlock_result(x);
1121 __ move(reg, result);
1122 }
1123
1124 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1125 CodeEmitInfo* info = state_for(x, x->state());
1126
1127 LIRItem length(x->length(), this);
1128 length.load_item_force(FrameMap::r19_opr);
1129
1130 LIR_Opr reg = result_register_for(x->type());
1131 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1132 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1133 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1134 LIR_Opr tmp4 = reg;
1135 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1136 LIR_Opr len = length.result();
1137 BasicType elem_type = x->elt_type();
1138
1139 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1140
1141 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1142 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1143
1144 LIR_Opr result = rlock_result(x);
1145 __ move(reg, result);
1146 }
1147
1148 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1149 LIRItem length(x->length(), this);
1150 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1151 // and therefore provide the state before the parameters have been consumed
1152 CodeEmitInfo* patching_info = NULL;
1153 if (!x->klass()->is_loaded() || PatchALot) {
1154 patching_info = state_for(x, x->state_before());
1155 }
1156
1157 CodeEmitInfo* info = state_for(x, x->state());
1158
1159 LIR_Opr reg = result_register_for(x->type());
1160 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1161 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1162 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1163 LIR_Opr tmp4 = reg;
1164 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1165
1166 length.load_item_force(FrameMap::r19_opr);
1167 LIR_Opr len = length.result();
1168
1169 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1170 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1171 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1172 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1173 }
1174 klass2reg_with_patching(klass_reg, obj, patching_info);
1175 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1176
1177 LIR_Opr result = rlock_result(x);
1178 __ move(reg, result);
1179 }
1180
1181
1182 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1183 Values* dims = x->dims();
1184 int i = dims->length();
1185 LIRItemList* items = new LIRItemList(i, i, NULL);
1186 while (i-- > 0) {
1187 LIRItem* size = new LIRItem(dims->at(i), this);
1188 items->at_put(i, size);
1189 }
1190
1191 // Evaluate state_for early since it may emit code.
1192 CodeEmitInfo* patching_info = NULL;
1193 if (!x->klass()->is_loaded() || PatchALot) {
1194 patching_info = state_for(x, x->state_before());
1195
1196 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1197 // clone all handlers (NOTE: Usually this is handled transparently
1198 // by the CodeEmitInfo cloning logic in CodeStub constructors but
1199 // is done explicitly here because a stub isn't being used).
1200 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1201 }
1202 CodeEmitInfo* info = state_for(x, x->state());
1203
1204 i = dims->length();
1205 while (i-- > 0) {
1206 LIRItem* size = items->at(i);
1207 size->load_item();
1208
1209 store_stack_parameter(size->result(), in_ByteSize(i*4));
1210 }
1211
1212 LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1213 klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1214
1215 LIR_Opr rank = FrameMap::r19_opr;
1216 __ move(LIR_OprFact::intConst(x->rank()), rank);
1217 LIR_Opr varargs = FrameMap::r2_opr;
1218 __ move(FrameMap::sp_opr, varargs);
1219 LIR_OprList* args = new LIR_OprList(3);
1220 args->append(klass_reg);
1221 args->append(rank);
1222 args->append(varargs);
1223 LIR_Opr reg = result_register_for(x->type());
1224 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1225 LIR_OprFact::illegalOpr,
1226 reg, args, info);
1227
1228 LIR_Opr result = rlock_result(x);
1229 __ move(reg, result);
1230 }
1231
1232 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1233 // nothing to do for now
1234 }
1235
1236 void LIRGenerator::do_CheckCast(CheckCast* x) {
1237 LIRItem obj(x->obj(), this);
1238
1239 CodeEmitInfo* patching_info = NULL;
1240 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1241 // must do this before locking the destination register as an oop register,
1242 // and before the obj is loaded (the latter is for deoptimization)
1243 patching_info = state_for(x, x->state_before());
1244 }
1245 obj.load_item();
1246
1247 // info for exceptions
1248 CodeEmitInfo* info_for_exception =
1249 (x->needs_exception_state() ? state_for(x) :
1250 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1251
1252 CodeStub* stub;
1253 if (x->is_incompatible_class_change_check()) {
1254 assert(patching_info == NULL, "can't patch this");
1255 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1256 } else if (x->is_invokespecial_receiver_check()) {
1257 assert(patching_info == NULL, "can't patch this");
1258 stub = new DeoptimizeStub(info_for_exception,
1259 Deoptimization::Reason_class_check,
1260 Deoptimization::Action_none);
1261 } else {
1262 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1263 }
1264 LIR_Opr reg = rlock_result(x);
1265 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1266 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1267 tmp3 = new_register(objectType);
1268 }
1269 __ checkcast(reg, obj.result(), x->klass(),
1270 new_register(objectType), new_register(objectType), tmp3,
1271 x->direct_compare(), info_for_exception, patching_info, stub,
1272 x->profiled_method(), x->profiled_bci());
1273 }
1274
1275 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1276 LIRItem obj(x->obj(), this);
1277
1278 // result and test object may not be in same register
1279 LIR_Opr reg = rlock_result(x);
1280 CodeEmitInfo* patching_info = NULL;
1281 if ((!x->klass()->is_loaded() || PatchALot)) {
1282 // must do this before locking the destination register as an oop register
1283 patching_info = state_for(x, x->state_before());
1284 }
1285 obj.load_item();
1286 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1287 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1288 tmp3 = new_register(objectType);
1289 }
1290 __ instanceof(reg, obj.result(), x->klass(),
1291 new_register(objectType), new_register(objectType), tmp3,
1292 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1293 }
1294
1295 void LIRGenerator::do_If(If* x) {
1296 assert(x->number_of_sux() == 2, "inconsistency");
1297 ValueTag tag = x->x()->type()->tag();
1298 bool is_safepoint = x->is_safepoint();
1299
1300 If::Condition cond = x->cond();
1301
1302 LIRItem xitem(x->x(), this);
1303 LIRItem yitem(x->y(), this);
1304 LIRItem* xin = &xitem;
1305 LIRItem* yin = &yitem;
1306
1307 if (tag == longTag) {
1308 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1309 // mirror for other conditions
1310 if (cond == If::gtr || cond == If::leq) {
1311 cond = Instruction::mirror(cond);
1312 xin = &yitem;
1313 yin = &xitem;
1314 }
1315 xin->set_destroys_register();
1316 }
1317 xin->load_item();
1318
1319 if (tag == longTag) {
1320 if (yin->is_constant()
1321 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1322 yin->dont_load_item();
1323 } else {
1324 yin->load_item();
1325 }
1326 } else if (tag == intTag) {
1327 if (yin->is_constant()
1328 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) {
1329 yin->dont_load_item();
1330 } else {
1331 yin->load_item();
1332 }
1333 } else {
1334 yin->load_item();
1335 }
1336
1337 set_no_result(x);
1338
1339 LIR_Opr left = xin->result();
1340 LIR_Opr right = yin->result();
1341
1342 // add safepoint before generating condition code so it can be recomputed
1343 if (x->is_safepoint()) {
1344 // increment backedge counter if needed
1345 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1346 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1347 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1348 }
1349
1350 __ cmp(lir_cond(cond), left, right);
1351 // Generate branch profiling. Profiling code doesn't kill flags.
1352 profile_branch(x, cond);
1353 move_to_phi(x->state());
1354 if (x->x()->type()->is_float_kind()) {
1355 __ branch(lir_cond(cond), x->tsux(), x->usux());
1356 } else {
1357 __ branch(lir_cond(cond), x->tsux());
1358 }
1359 assert(x->default_sux() == x->fsux(), "wrong destination above");
1360 __ jump(x->default_sux());
1361 }
1362
1363 LIR_Opr LIRGenerator::getThreadPointer() {
1364 return FrameMap::as_pointer_opr(rthread);
1365 }
1366
1367 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1368
1369 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1370 CodeEmitInfo* info) {
1371 __ volatile_store_mem_reg(value, address, info);
1372 }
1373
1374 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1375 CodeEmitInfo* info) {
1376 // 8179954: We need to make sure that the code generated for
1377 // volatile accesses forms a sequentially-consistent set of
1378 // operations when combined with STLR and LDAR. Without a leading
1379 // membar it's possible for a simple Dekker test to fail if loads
1380 // use LD;DMB but stores use STLR. This can happen if C2 compiles
1381 // the stores in one method and C1 compiles the loads in another.
1382 if (!CompilerConfig::is_c1_only_no_jvmci()) {
1383 __ membar();
1384 }
1385 __ volatile_load_mem_reg(address, result, info);
1386 }