1 /*
2 * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArray.hpp"
35 #include "ci/ciObjArrayKlass.hpp"
36 #include "ci/ciTypeArrayKlass.hpp"
37 #include "compiler/compilerDefinitions.inline.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_aarch64.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
50 void LIRItem::load_byte_item() {
51 load_item();
52 }
53
54
55 void LIRItem::load_nonconstant() {
56 LIR_Opr r = value()->operand();
57 if (r->is_constant()) {
58 _result = r;
59 } else {
60 load_item();
61 }
62 }
63
64 //--------------------------------------------------------------
65 // LIRGenerator
66 //--------------------------------------------------------------
67
68
69 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
70 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; }
71 LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
72 LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
73 LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
74 LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
75 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
76 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; }
77 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }
78
79
80 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
81 LIR_Opr opr;
82 switch (type->tag()) {
83 case intTag: opr = FrameMap::r0_opr; break;
84 case objectTag: opr = FrameMap::r0_oop_opr; break;
85 case longTag: opr = FrameMap::long0_opr; break;
86 case floatTag: opr = FrameMap::fpu0_float_opr; break;
87 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
88
89 case addressTag:
90 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
91 }
92
93 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
94 return opr;
95 }
96
97
98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
99 LIR_Opr reg = new_register(T_INT);
100 set_vreg_flag(reg, LIRGenerator::byte_reg);
101 return reg;
102 }
103
104
105 //--------- loading items into registers --------------------------------
106
107
108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
109 if (v->type()->as_IntConstant() != nullptr) {
110 return v->type()->as_IntConstant()->value() == 0L;
111 } else if (v->type()->as_LongConstant() != nullptr) {
112 return v->type()->as_LongConstant()->value() == 0L;
113 } else if (v->type()->as_ObjectConstant() != nullptr) {
114 return v->type()->as_ObjectConstant()->value()->is_null_object();
115 } else {
116 return false;
117 }
118 }
119
120 bool LIRGenerator::can_inline_as_constant(Value v) const {
121 // FIXME: Just a guess
122 if (v->type()->as_IntConstant() != nullptr) {
123 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
124 } else if (v->type()->as_LongConstant() != nullptr) {
125 return v->type()->as_LongConstant()->value() == 0L;
126 } else if (v->type()->as_ObjectConstant() != nullptr) {
127 return v->type()->as_ObjectConstant()->value()->is_null_object();
128 } else {
129 return false;
130 }
131 }
132
133
134 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
135
136
137 LIR_Opr LIRGenerator::safepoint_poll_register() {
138 return LIR_OprFact::illegalOpr;
139 }
140
141
142 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
143 int shift, int disp, BasicType type) {
144 assert(base->is_register(), "must be");
145 intx large_disp = disp;
146
147 // accumulate fixed displacements
148 if (index->is_constant()) {
149 LIR_Const *constant = index->as_constant_ptr();
150 if (constant->type() == T_INT) {
151 large_disp += ((intx)index->as_jint()) << shift;
152 } else {
153 assert(constant->type() == T_LONG, "should be");
154 jlong c = index->as_jlong() << shift;
155 if ((jlong)((jint)c) == c) {
156 large_disp += c;
157 index = LIR_OprFact::illegalOpr;
158 } else {
159 LIR_Opr tmp = new_register(T_LONG);
160 __ move(index, tmp);
161 index = tmp;
162 // apply shift and displacement below
163 }
164 }
165 }
166
167 if (index->is_register()) {
168 // apply the shift and accumulate the displacement
169 if (shift > 0) {
170 // Use long register to avoid overflow when shifting large index values left.
171 LIR_Opr tmp = new_register(T_LONG);
172 __ convert(Bytecodes::_i2l, index, tmp);
173 __ shift_left(tmp, shift, tmp);
174 index = tmp;
175 }
176 if (large_disp != 0) {
177 LIR_Opr tmp = new_pointer_register();
178 if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
179 __ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
180 index = tmp;
181 } else {
182 __ move(LIR_OprFact::intptrConst(large_disp), tmp);
183 __ add(tmp, index, tmp);
184 index = tmp;
185 }
186 large_disp = 0;
187 }
188 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
189 // index is illegal so replace it with the displacement loaded into a register
190 index = new_pointer_register();
191 __ move(LIR_OprFact::intptrConst(large_disp), index);
192 large_disp = 0;
193 }
194
195 // at this point we either have base + index or base + displacement
196 if (large_disp == 0 && index->is_register()) {
197 return new LIR_Address(base, index, type);
198 } else {
199 assert(Address::offset_ok_for_immed(large_disp, shift), "failed for large_disp: " INTPTR_FORMAT " and shift %d", large_disp, shift);
200 return new LIR_Address(base, large_disp, type);
201 }
202 }
203
204 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
205 BasicType type) {
206 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
207 int elem_size = type2aelembytes(type);
208 int shift = exact_log2(elem_size);
209 return generate_address(array_opr, index_opr, shift, offset_in_bytes, type);
210 }
211
212 LIR_Opr LIRGenerator::load_immediate(jlong x, BasicType type) {
213 LIR_Opr r;
214 if (type == T_LONG) {
215 r = LIR_OprFact::longConst(x);
216 if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
217 LIR_Opr tmp = new_register(type);
218 __ move(r, tmp);
219 return tmp;
220 }
221 } else if (type == T_INT) {
222 r = LIR_OprFact::intConst(checked_cast<jint>(x));
223 if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
224 // This is all rather nasty. We don't know whether our constant
225 // is required for a logical or an arithmetic operation, wo we
226 // don't know what the range of valid values is!!
227 LIR_Opr tmp = new_register(type);
228 __ move(r, tmp);
229 return tmp;
230 }
231 } else {
232 ShouldNotReachHere();
233 }
234 return r;
235 }
236
237
238
239 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
240 LIR_Opr pointer = new_pointer_register();
241 __ move(LIR_OprFact::intptrConst(counter), pointer);
242 LIR_Address* addr = new LIR_Address(pointer, type);
243 increment_counter(addr, step);
244 }
245
246
247 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
248 LIR_Opr imm;
249 switch(addr->type()) {
250 case T_INT:
251 imm = LIR_OprFact::intConst(step);
252 break;
253 case T_LONG:
254 imm = LIR_OprFact::longConst(step);
255 break;
256 default:
257 ShouldNotReachHere();
258 }
259 LIR_Opr reg = new_register(addr->type());
260 __ load(addr, reg);
261 __ add(reg, imm, reg);
262 __ store(reg, addr);
263 }
264
265 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
266 LIR_Opr reg = new_register(T_INT);
267 __ load(generate_address(base, disp, T_INT), reg, info);
268 __ cmp(condition, reg, LIR_OprFact::intConst(c));
269 }
270
271 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
272 LIR_Opr reg1 = new_register(T_INT);
273 __ load(generate_address(base, disp, type), reg1, info);
274 __ cmp(condition, reg, reg1);
275 }
276
277
278 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
279 juint u_value = (juint)c;
280 if (is_power_of_2(u_value - 1)) {
281 __ shift_left(left, exact_log2(u_value - 1), tmp);
282 __ add(tmp, left, result);
283 return true;
284 } else if (is_power_of_2(u_value + 1)) {
285 __ shift_left(left, exact_log2(u_value + 1), tmp);
286 __ sub(tmp, left, result);
287 return true;
288 } else if (c == -1) {
289 __ negate(left, result);
290 return true;
291 }
292 return false;
293 }
294
295 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
296 BasicType type = item->type();
297 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
298 }
299
300 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
301 LIR_Opr tmp1 = new_register(objectType);
302 LIR_Opr tmp2 = new_register(objectType);
303 LIR_Opr tmp3 = new_register(objectType);
304 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
305 }
306
307 //----------------------------------------------------------------------
308 // visitor functions
309 //----------------------------------------------------------------------
310
311 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
312 assert(x->is_pinned(),"");
313 LIRItem obj(x->obj(), this);
314 obj.load_item();
315
316 set_no_result(x);
317
318 // "lock" stores the address of the monitor stack slot, so this is not an oop
319 LIR_Opr lock = new_register(T_INT);
320 LIR_Opr scratch = new_register(T_INT);
321
322 CodeEmitInfo* info_for_exception = nullptr;
323 if (x->needs_null_check()) {
324 info_for_exception = state_for(x);
325 }
326 // this CodeEmitInfo must not have the xhandlers because here the
327 // object is already locked (xhandlers expect object to be unlocked)
328 CodeEmitInfo* info = state_for(x, x->state(), true);
329 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
330 x->monitor_no(), info_for_exception, info);
331 }
332
333
334 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
335 assert(x->is_pinned(),"");
336
337 LIRItem obj(x->obj(), this);
338 obj.dont_load_item();
339
340 LIR_Opr lock = new_register(T_INT);
341 LIR_Opr obj_temp = new_register(T_INT);
342 LIR_Opr scratch = new_register(T_INT);
343 set_no_result(x);
344 monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
345 }
346
347 void LIRGenerator::do_NegateOp(NegateOp* x) {
348
349 LIRItem from(x->x(), this);
350 from.load_item();
351 LIR_Opr result = rlock_result(x);
352 __ negate (from.result(), result);
353
354 }
355
356 // for _fadd, _fmul, _fsub, _fdiv, _frem
357 // _dadd, _dmul, _dsub, _ddiv, _drem
358 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
359
360 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
361 // float remainder is implemented as a direct call into the runtime
362 LIRItem right(x->x(), this);
363 LIRItem left(x->y(), this);
364
365 BasicTypeList signature(2);
366 if (x->op() == Bytecodes::_frem) {
367 signature.append(T_FLOAT);
368 signature.append(T_FLOAT);
369 } else {
370 signature.append(T_DOUBLE);
371 signature.append(T_DOUBLE);
372 }
373 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
374
375 const LIR_Opr result_reg = result_register_for(x->type());
376 left.load_item_force(cc->at(1));
377 right.load_item();
378
379 __ move(right.result(), cc->at(0));
380
381 address entry;
382 if (x->op() == Bytecodes::_frem) {
383 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
384 } else {
385 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
386 }
387
388 LIR_Opr result = rlock_result(x);
389 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
390 __ move(result_reg, result);
391
392 return;
393 }
394
395 LIRItem left(x->x(), this);
396 LIRItem right(x->y(), this);
397 LIRItem* left_arg = &left;
398 LIRItem* right_arg = &right;
399
400 // Always load right hand side.
401 right.load_item();
402
403 if (!left.is_register())
404 left.load_item();
405
406 LIR_Opr reg = rlock(x);
407
408 arithmetic_op_fpu(x->op(), reg, left.result(), right.result());
409
410 set_result(x, reg);
411 }
412
413 // for _ladd, _lmul, _lsub, _ldiv, _lrem
414 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
415
416 // missing test if instr is commutative and if we should swap
417 LIRItem left(x->x(), this);
418 LIRItem right(x->y(), this);
419
420 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
421
422 left.load_item();
423 bool need_zero_check = true;
424 if (right.is_constant()) {
425 jlong c = right.get_jlong_constant();
426 // no need to do div-by-zero check if the divisor is a non-zero constant
427 if (c != 0) need_zero_check = false;
428 // do not load right if the divisor is a power-of-2 constant
429 if (c > 0 && is_power_of_2(c)) {
430 right.dont_load_item();
431 } else {
432 right.load_item();
433 }
434 } else {
435 right.load_item();
436 }
437 if (need_zero_check) {
438 CodeEmitInfo* info = state_for(x);
439 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
440 __ branch(lir_cond_equal, new DivByZeroStub(info));
441 }
442
443 rlock_result(x);
444 switch (x->op()) {
445 case Bytecodes::_lrem:
446 __ rem (left.result(), right.result(), x->operand());
447 break;
448 case Bytecodes::_ldiv:
449 __ div (left.result(), right.result(), x->operand());
450 break;
451 default:
452 ShouldNotReachHere();
453 break;
454 }
455
456
457 } else {
458 assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
459 "expect lmul, ladd or lsub");
460 // add, sub, mul
461 left.load_item();
462 if (! right.is_register()) {
463 if (x->op() == Bytecodes::_lmul
464 || ! right.is_constant()
465 || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
466 right.load_item();
467 } else { // add, sub
468 assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
469 // don't load constants to save register
470 right.load_nonconstant();
471 }
472 }
473 rlock_result(x);
474 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
475 }
476 }
477
478 // for: _iadd, _imul, _isub, _idiv, _irem
479 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
480
481 // Test if instr is commutative and if we should swap
482 LIRItem left(x->x(), this);
483 LIRItem right(x->y(), this);
484 LIRItem* left_arg = &left;
485 LIRItem* right_arg = &right;
486 if (x->is_commutative() && left.is_stack() && right.is_register()) {
487 // swap them if left is real stack (or cached) and right is real register(not cached)
488 left_arg = &right;
489 right_arg = &left;
490 }
491
492 left_arg->load_item();
493
494 // do not need to load right, as we can handle stack and constants
495 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
496
497 rlock_result(x);
498 bool need_zero_check = true;
499 if (right.is_constant()) {
500 jint c = right.get_jint_constant();
501 // no need to do div-by-zero check if the divisor is a non-zero constant
502 if (c != 0) need_zero_check = false;
503 // do not load right if the divisor is a power-of-2 constant
504 if (c > 0 && is_power_of_2(c)) {
505 right_arg->dont_load_item();
506 } else {
507 right_arg->load_item();
508 }
509 } else {
510 right_arg->load_item();
511 }
512 if (need_zero_check) {
513 CodeEmitInfo* info = state_for(x);
514 __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
515 __ branch(lir_cond_equal, new DivByZeroStub(info));
516 }
517
518 LIR_Opr ill = LIR_OprFact::illegalOpr;
519 if (x->op() == Bytecodes::_irem) {
520 __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
521 } else if (x->op() == Bytecodes::_idiv) {
522 __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
523 }
524
525 } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
526 if (right.is_constant()
527 && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
528 right.load_nonconstant();
529 } else {
530 right.load_item();
531 }
532 rlock_result(x);
533 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
534 } else {
535 assert (x->op() == Bytecodes::_imul, "expect imul");
536 if (right.is_constant()) {
537 jint c = right.get_jint_constant();
538 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
539 right_arg->dont_load_item();
540 } else {
541 // Cannot use constant op.
542 right_arg->load_item();
543 }
544 } else {
545 right.load_item();
546 }
547 rlock_result(x);
548 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
549 }
550 }
551
552 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
553 // when an operand with use count 1 is the left operand, then it is
554 // likely that no move for 2-operand-LIR-form is necessary
555 if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
556 x->swap_operands();
557 }
558
559 ValueTag tag = x->type()->tag();
560 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
561 switch (tag) {
562 case floatTag:
563 case doubleTag: do_ArithmeticOp_FPU(x); return;
564 case longTag: do_ArithmeticOp_Long(x); return;
565 case intTag: do_ArithmeticOp_Int(x); return;
566 default: ShouldNotReachHere(); return;
567 }
568 }
569
570 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
571 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
572
573 LIRItem left(x->x(), this);
574 LIRItem right(x->y(), this);
575
576 left.load_item();
577
578 rlock_result(x);
579 if (right.is_constant()) {
580 right.dont_load_item();
581
582 switch (x->op()) {
583 case Bytecodes::_ishl: {
584 int c = right.get_jint_constant() & 0x1f;
585 __ shift_left(left.result(), c, x->operand());
586 break;
587 }
588 case Bytecodes::_ishr: {
589 int c = right.get_jint_constant() & 0x1f;
590 __ shift_right(left.result(), c, x->operand());
591 break;
592 }
593 case Bytecodes::_iushr: {
594 int c = right.get_jint_constant() & 0x1f;
595 __ unsigned_shift_right(left.result(), c, x->operand());
596 break;
597 }
598 case Bytecodes::_lshl: {
599 int c = right.get_jint_constant() & 0x3f;
600 __ shift_left(left.result(), c, x->operand());
601 break;
602 }
603 case Bytecodes::_lshr: {
604 int c = right.get_jint_constant() & 0x3f;
605 __ shift_right(left.result(), c, x->operand());
606 break;
607 }
608 case Bytecodes::_lushr: {
609 int c = right.get_jint_constant() & 0x3f;
610 __ unsigned_shift_right(left.result(), c, x->operand());
611 break;
612 }
613 default:
614 ShouldNotReachHere();
615 }
616 } else {
617 right.load_item();
618 LIR_Opr tmp = new_register(T_INT);
619 switch (x->op()) {
620 case Bytecodes::_ishl: {
621 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
622 __ shift_left(left.result(), tmp, x->operand(), tmp);
623 break;
624 }
625 case Bytecodes::_ishr: {
626 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
627 __ shift_right(left.result(), tmp, x->operand(), tmp);
628 break;
629 }
630 case Bytecodes::_iushr: {
631 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
632 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
633 break;
634 }
635 case Bytecodes::_lshl: {
636 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
637 __ shift_left(left.result(), tmp, x->operand(), tmp);
638 break;
639 }
640 case Bytecodes::_lshr: {
641 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
642 __ shift_right(left.result(), tmp, x->operand(), tmp);
643 break;
644 }
645 case Bytecodes::_lushr: {
646 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
647 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
648 break;
649 }
650 default:
651 ShouldNotReachHere();
652 }
653 }
654 }
655
656 // _iand, _land, _ior, _lor, _ixor, _lxor
657 void LIRGenerator::do_LogicOp(LogicOp* x) {
658
659 LIRItem left(x->x(), this);
660 LIRItem right(x->y(), this);
661
662 left.load_item();
663
664 rlock_result(x);
665 if (right.is_constant()
666 && ((right.type()->tag() == intTag
667 && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
668 || (right.type()->tag() == longTag
669 && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) {
670 right.dont_load_item();
671 } else {
672 right.load_item();
673 }
674 switch (x->op()) {
675 case Bytecodes::_iand:
676 case Bytecodes::_land:
677 __ logical_and(left.result(), right.result(), x->operand()); break;
678 case Bytecodes::_ior:
679 case Bytecodes::_lor:
680 __ logical_or (left.result(), right.result(), x->operand()); break;
681 case Bytecodes::_ixor:
682 case Bytecodes::_lxor:
683 __ logical_xor(left.result(), right.result(), x->operand()); break;
684 default: Unimplemented();
685 }
686 }
687
688 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
689 void LIRGenerator::do_CompareOp(CompareOp* x) {
690 LIRItem left(x->x(), this);
691 LIRItem right(x->y(), this);
692 ValueTag tag = x->x()->type()->tag();
693 if (tag == longTag) {
694 left.set_destroys_register();
695 }
696 left.load_item();
697 right.load_item();
698 LIR_Opr reg = rlock_result(x);
699
700 if (x->x()->type()->is_float_kind()) {
701 Bytecodes::Code code = x->op();
702 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
703 } else if (x->x()->type()->tag() == longTag) {
704 __ lcmp2int(left.result(), right.result(), reg);
705 } else {
706 Unimplemented();
707 }
708 }
709
710 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
711 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
712 new_value.load_item();
713 cmp_value.load_item();
714 LIR_Opr result = new_register(T_INT);
715 if (is_reference_type(type)) {
716 __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
717 } else if (type == T_INT) {
718 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
719 } else if (type == T_LONG) {
720 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
721 } else {
722 ShouldNotReachHere();
723 Unimplemented();
724 }
725 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
726 return result;
727 }
728
729 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
730 bool is_oop = is_reference_type(type);
731 LIR_Opr result = new_register(type);
732 value.load_item();
733 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
734 LIR_Opr tmp = new_register(T_INT);
735 __ xchg(addr, value.result(), result, tmp);
736 return result;
737 }
738
739 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
740 LIR_Opr result = new_register(type);
741 value.load_item();
742 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
743 LIR_Opr tmp = new_register(T_INT);
744 __ xadd(addr, value.result(), result, tmp);
745 return result;
746 }
747
748 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
749 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
750 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
751 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
752 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
753 x->id() == vmIntrinsics::_dlog10) {
754 do_LibmIntrinsic(x);
755 return;
756 }
757 switch (x->id()) {
758 case vmIntrinsics::_dabs:
759 case vmIntrinsics::_dsqrt:
760 case vmIntrinsics::_dsqrt_strict:
761 case vmIntrinsics::_floatToFloat16:
762 case vmIntrinsics::_float16ToFloat: {
763 assert(x->number_of_arguments() == 1, "wrong type");
764 LIRItem value(x->argument_at(0), this);
765 value.load_item();
766 LIR_Opr src = value.result();
767 LIR_Opr dst = rlock_result(x);
768
769 switch (x->id()) {
770 case vmIntrinsics::_dsqrt:
771 case vmIntrinsics::_dsqrt_strict: {
772 __ sqrt(src, dst, LIR_OprFact::illegalOpr);
773 break;
774 }
775 case vmIntrinsics::_dabs: {
776 __ abs(src, dst, LIR_OprFact::illegalOpr);
777 break;
778 }
779 case vmIntrinsics::_floatToFloat16: {
780 LIR_Opr tmp = new_register(T_FLOAT);
781 __ f2hf(src, dst, tmp);
782 break;
783 }
784 case vmIntrinsics::_float16ToFloat: {
785 LIR_Opr tmp = new_register(T_FLOAT);
786 __ hf2f(src, dst, tmp);
787 break;
788 }
789 default:
790 ShouldNotReachHere();
791 }
792 break;
793 }
794 default:
795 ShouldNotReachHere();
796 }
797 }
798
799 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
800 LIRItem value(x->argument_at(0), this);
801 value.set_destroys_register();
802
803 LIR_Opr calc_result = rlock_result(x);
804 LIR_Opr result_reg = result_register_for(x->type());
805
806 CallingConvention* cc = nullptr;
807
808 if (x->id() == vmIntrinsics::_dpow) {
809 LIRItem value1(x->argument_at(1), this);
810
811 value1.set_destroys_register();
812
813 BasicTypeList signature(2);
814 signature.append(T_DOUBLE);
815 signature.append(T_DOUBLE);
816 cc = frame_map()->c_calling_convention(&signature);
817 value.load_item_force(cc->at(0));
818 value1.load_item_force(cc->at(1));
819 } else {
820 BasicTypeList signature(1);
821 signature.append(T_DOUBLE);
822 cc = frame_map()->c_calling_convention(&signature);
823 value.load_item_force(cc->at(0));
824 }
825
826 switch (x->id()) {
827 case vmIntrinsics::_dexp:
828 if (StubRoutines::dexp() != nullptr) {
829 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
830 } else {
831 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
832 }
833 break;
834 case vmIntrinsics::_dlog:
835 // Math.log intrinsic is not implemented on AArch64 (see JDK-8210858),
836 // but we can still call the shared runtime.
837 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
838 break;
839 case vmIntrinsics::_dlog10:
840 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
841 break;
842 case vmIntrinsics::_dpow:
843 if (StubRoutines::dpow() != nullptr) {
844 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
845 } else {
846 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
847 }
848 break;
849 case vmIntrinsics::_dsin:
850 if (StubRoutines::dsin() != nullptr) {
851 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
852 } else {
853 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
854 }
855 break;
856 case vmIntrinsics::_dcos:
857 if (StubRoutines::dcos() != nullptr) {
858 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
859 } else {
860 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
861 }
862 break;
863 case vmIntrinsics::_dtan:
864 if (StubRoutines::dtan() != nullptr) {
865 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
866 } else {
867 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
868 }
869 break;
870 default: ShouldNotReachHere();
871 }
872 __ move(result_reg, calc_result);
873 }
874
875
876 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
877 assert(x->number_of_arguments() == 5, "wrong type");
878
879 // Make all state_for calls early since they can emit code
880 CodeEmitInfo* info = nullptr;
881 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
882 info = state_for(x, x->state_before());
883 info->set_force_reexecute();
884 } else {
885 info = state_for(x, x->state());
886 }
887
888 LIRItem src(x->argument_at(0), this);
889 LIRItem src_pos(x->argument_at(1), this);
890 LIRItem dst(x->argument_at(2), this);
891 LIRItem dst_pos(x->argument_at(3), this);
892 LIRItem length(x->argument_at(4), this);
893
894 // operands for arraycopy must use fixed registers, otherwise
895 // LinearScan will fail allocation (because arraycopy always needs a
896 // call)
897
898 // The java calling convention will give us enough registers
899 // so that on the stub side the args will be perfect already.
900 // On the other slow/special case side we call C and the arg
901 // positions are not similar enough to pick one as the best.
902 // Also because the java calling convention is a "shifted" version
903 // of the C convention we can process the java args trivially into C
904 // args without worry of overwriting during the xfer
905
906 src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
907 src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
908 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
909 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
910 length.load_item_force (FrameMap::as_opr(j_rarg4));
911
912 LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
913
914 set_no_result(x);
915
916 int flags;
917 ciArrayKlass* expected_type;
918 arraycopy_helper(x, &flags, &expected_type);
919 if (x->check_flag(Instruction::OmitChecksFlag)) {
920 flags = 0;
921 }
922
923 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
924 }
925
926 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
927 assert(UseCRC32Intrinsics, "why are we here?");
928 // Make all state_for calls early since they can emit code
929 LIR_Opr result = rlock_result(x);
930 switch (x->id()) {
931 case vmIntrinsics::_updateCRC32: {
932 LIRItem crc(x->argument_at(0), this);
933 LIRItem val(x->argument_at(1), this);
934 // val is destroyed by update_crc32
935 val.set_destroys_register();
936 crc.load_item();
937 val.load_item();
938 __ update_crc32(crc.result(), val.result(), result);
939 break;
940 }
941 case vmIntrinsics::_updateBytesCRC32:
942 case vmIntrinsics::_updateByteBufferCRC32: {
943 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
944
945 LIRItem crc(x->argument_at(0), this);
946 LIRItem buf(x->argument_at(1), this);
947 LIRItem off(x->argument_at(2), this);
948 LIRItem len(x->argument_at(3), this);
949 buf.load_item();
950 off.load_nonconstant();
951
952 LIR_Opr index = off.result();
953 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
954 if (off.result()->is_constant()) {
955 index = LIR_OprFact::illegalOpr;
956 offset += off.result()->as_jint();
957 }
958 LIR_Opr base_op = buf.result();
959
960 if (index->is_valid()) {
961 LIR_Opr tmp = new_register(T_LONG);
962 __ convert(Bytecodes::_i2l, index, tmp);
963 index = tmp;
964 }
965
966 if (offset) {
967 LIR_Opr tmp = new_pointer_register();
968 __ add(base_op, LIR_OprFact::intConst(offset), tmp);
969 base_op = tmp;
970 offset = 0;
971 }
972
973 LIR_Address* a = new LIR_Address(base_op,
974 index,
975 offset,
976 T_BYTE);
977 BasicTypeList signature(3);
978 signature.append(T_INT);
979 signature.append(T_ADDRESS);
980 signature.append(T_INT);
981 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
982 const LIR_Opr result_reg = result_register_for(x->type());
983
984 LIR_Opr addr = new_register(T_ADDRESS);
985 __ leal(LIR_OprFact::address(a), addr);
986
987 crc.load_item_force(cc->at(0));
988 __ move(addr, cc->at(1));
989 len.load_item_force(cc->at(2));
990
991 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
992 __ move(result_reg, result);
993
994 break;
995 }
996 default: {
997 ShouldNotReachHere();
998 }
999 }
1000 }
1001
1002 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1003 assert(UseCRC32CIntrinsics, "why are we here?");
1004 // Make all state_for calls early since they can emit code
1005 LIR_Opr result = rlock_result(x);
1006 switch (x->id()) {
1007 case vmIntrinsics::_updateBytesCRC32C:
1008 case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1009 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1010 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1011
1012 LIRItem crc(x->argument_at(0), this);
1013 LIRItem buf(x->argument_at(1), this);
1014 LIRItem off(x->argument_at(2), this);
1015 LIRItem end(x->argument_at(3), this);
1016
1017 buf.load_item();
1018 off.load_nonconstant();
1019 end.load_nonconstant();
1020
1021 // len = end - off
1022 LIR_Opr len = end.result();
1023 LIR_Opr tmpA = new_register(T_INT);
1024 LIR_Opr tmpB = new_register(T_INT);
1025 __ move(end.result(), tmpA);
1026 __ move(off.result(), tmpB);
1027 __ sub(tmpA, tmpB, tmpA);
1028 len = tmpA;
1029
1030 LIR_Opr index = off.result();
1031 if(off.result()->is_constant()) {
1032 index = LIR_OprFact::illegalOpr;
1033 offset += off.result()->as_jint();
1034 }
1035 LIR_Opr base_op = buf.result();
1036
1037 if (index->is_valid()) {
1038 LIR_Opr tmp = new_register(T_LONG);
1039 __ convert(Bytecodes::_i2l, index, tmp);
1040 index = tmp;
1041 }
1042
1043 if (offset) {
1044 LIR_Opr tmp = new_pointer_register();
1045 __ add(base_op, LIR_OprFact::intConst(offset), tmp);
1046 base_op = tmp;
1047 offset = 0;
1048 }
1049
1050 LIR_Address* a = new LIR_Address(base_op,
1051 index,
1052 offset,
1053 T_BYTE);
1054 BasicTypeList signature(3);
1055 signature.append(T_INT);
1056 signature.append(T_ADDRESS);
1057 signature.append(T_INT);
1058 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1059 const LIR_Opr result_reg = result_register_for(x->type());
1060
1061 LIR_Opr addr = new_register(T_ADDRESS);
1062 __ leal(LIR_OprFact::address(a), addr);
1063
1064 crc.load_item_force(cc->at(0));
1065 __ move(addr, cc->at(1));
1066 __ move(len, cc->at(2));
1067
1068 __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args());
1069 __ move(result_reg, result);
1070
1071 break;
1072 }
1073 default: {
1074 ShouldNotReachHere();
1075 }
1076 }
1077 }
1078
1079 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1080 assert(x->number_of_arguments() == 3, "wrong type");
1081 assert(UseFMA, "Needs FMA instructions support.");
1082 LIRItem value(x->argument_at(0), this);
1083 LIRItem value1(x->argument_at(1), this);
1084 LIRItem value2(x->argument_at(2), this);
1085
1086 value.load_item();
1087 value1.load_item();
1088 value2.load_item();
1089
1090 LIR_Opr calc_input = value.result();
1091 LIR_Opr calc_input1 = value1.result();
1092 LIR_Opr calc_input2 = value2.result();
1093 LIR_Opr calc_result = rlock_result(x);
1094
1095 switch (x->id()) {
1096 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1097 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1098 default: ShouldNotReachHere();
1099 }
1100 }
1101
1102 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1103 fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1104 }
1105
1106 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1107 // _i2b, _i2c, _i2s
1108 void LIRGenerator::do_Convert(Convert* x) {
1109 LIRItem value(x->value(), this);
1110 value.load_item();
1111 LIR_Opr input = value.result();
1112 LIR_Opr result = rlock(x);
1113
1114 // arguments of lir_convert
1115 LIR_Opr conv_input = input;
1116 LIR_Opr conv_result = result;
1117
1118 __ convert(x->op(), conv_input, conv_result);
1119
1120 assert(result->is_virtual(), "result must be virtual register");
1121 set_result(x, result);
1122 }
1123
1124 void LIRGenerator::do_NewInstance(NewInstance* x) {
1125 #ifndef PRODUCT
1126 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1127 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1128 }
1129 #endif
1130 CodeEmitInfo* info = state_for(x, x->state());
1131 LIR_Opr reg = result_register_for(x->type());
1132 new_instance(reg, x->klass(), x->is_unresolved(),
1133 FrameMap::r10_oop_opr,
1134 FrameMap::r11_oop_opr,
1135 FrameMap::r4_oop_opr,
1136 LIR_OprFact::illegalOpr,
1137 FrameMap::r3_metadata_opr, info);
1138 LIR_Opr result = rlock_result(x);
1139 __ move(reg, result);
1140 }
1141
1142 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1143 CodeEmitInfo* info = nullptr;
1144 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1145 info = state_for(x, x->state_before());
1146 info->set_force_reexecute();
1147 } else {
1148 info = state_for(x, x->state());
1149 }
1150
1151 LIRItem length(x->length(), this);
1152 length.load_item_force(FrameMap::r19_opr);
1153
1154 LIR_Opr reg = result_register_for(x->type());
1155 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1156 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1157 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1158 LIR_Opr tmp4 = reg;
1159 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1160 LIR_Opr len = length.result();
1161 BasicType elem_type = x->elt_type();
1162
1163 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1164
1165 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1166 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array());
1167
1168 LIR_Opr result = rlock_result(x);
1169 __ move(reg, result);
1170 }
1171
1172 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1173 LIRItem length(x->length(), this);
1174 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1175 // and therefore provide the state before the parameters have been consumed
1176 CodeEmitInfo* patching_info = nullptr;
1177 if (!x->klass()->is_loaded() || PatchALot) {
1178 patching_info = state_for(x, x->state_before());
1179 }
1180
1181 CodeEmitInfo* info = state_for(x, x->state());
1182
1183 LIR_Opr reg = result_register_for(x->type());
1184 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1185 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1186 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1187 LIR_Opr tmp4 = reg;
1188 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1189
1190 length.load_item_force(FrameMap::r19_opr);
1191 LIR_Opr len = length.result();
1192
1193 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1194 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1195 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1196 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1197 }
1198 klass2reg_with_patching(klass_reg, obj, patching_info);
1199 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1200
1201 LIR_Opr result = rlock_result(x);
1202 __ move(reg, result);
1203 }
1204
1205
1206 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1207 Values* dims = x->dims();
1208 int i = dims->length();
1209 LIRItemList* items = new LIRItemList(i, i, nullptr);
1210 while (i-- > 0) {
1211 LIRItem* size = new LIRItem(dims->at(i), this);
1212 items->at_put(i, size);
1213 }
1214
1215 // Evaluate state_for early since it may emit code.
1216 CodeEmitInfo* patching_info = nullptr;
1217 if (!x->klass()->is_loaded() || PatchALot) {
1218 patching_info = state_for(x, x->state_before());
1219
1220 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1221 // clone all handlers (NOTE: Usually this is handled transparently
1222 // by the CodeEmitInfo cloning logic in CodeStub constructors but
1223 // is done explicitly here because a stub isn't being used).
1224 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1225 }
1226 CodeEmitInfo* info = state_for(x, x->state());
1227
1228 i = dims->length();
1229 while (i-- > 0) {
1230 LIRItem* size = items->at(i);
1231 size->load_item();
1232
1233 store_stack_parameter(size->result(), in_ByteSize(i*4));
1234 }
1235
1236 LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1237 klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1238
1239 LIR_Opr rank = FrameMap::r19_opr;
1240 __ move(LIR_OprFact::intConst(x->rank()), rank);
1241 LIR_Opr varargs = FrameMap::r2_opr;
1242 __ move(FrameMap::sp_opr, varargs);
1243 LIR_OprList* args = new LIR_OprList(3);
1244 args->append(klass_reg);
1245 args->append(rank);
1246 args->append(varargs);
1247 LIR_Opr reg = result_register_for(x->type());
1248 __ call_runtime(Runtime1::entry_for(StubId::c1_new_multi_array_id),
1249 LIR_OprFact::illegalOpr,
1250 reg, args, info);
1251
1252 LIR_Opr result = rlock_result(x);
1253 __ move(reg, result);
1254 }
1255
1256 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1257 // nothing to do for now
1258 }
1259
1260 void LIRGenerator::do_CheckCast(CheckCast* x) {
1261 LIRItem obj(x->obj(), this);
1262
1263 CodeEmitInfo* patching_info = nullptr;
1264 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1265 // must do this before locking the destination register as an oop register,
1266 // and before the obj is loaded (the latter is for deoptimization)
1267 patching_info = state_for(x, x->state_before());
1268 }
1269 obj.load_item();
1270
1271 // info for exceptions
1272 CodeEmitInfo* info_for_exception =
1273 (x->needs_exception_state() ? state_for(x) :
1274 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1275
1276 CodeStub* stub;
1277 if (x->is_incompatible_class_change_check()) {
1278 assert(patching_info == nullptr, "can't patch this");
1279 stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1280 } else if (x->is_invokespecial_receiver_check()) {
1281 assert(patching_info == nullptr, "can't patch this");
1282 stub = new DeoptimizeStub(info_for_exception,
1283 Deoptimization::Reason_class_check,
1284 Deoptimization::Action_none);
1285 } else {
1286 stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, obj.result(), info_for_exception);
1287 }
1288 LIR_Opr reg = rlock_result(x);
1289 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1290 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1291 tmp3 = new_register(objectType);
1292 }
1293 __ checkcast(reg, obj.result(), x->klass(),
1294 new_register(objectType), new_register(objectType), tmp3,
1295 x->direct_compare(), info_for_exception, patching_info, stub,
1296 x->profiled_method(), x->profiled_bci());
1297 }
1298
1299 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1300 LIRItem obj(x->obj(), this);
1301
1302 // result and test object may not be in same register
1303 LIR_Opr reg = rlock_result(x);
1304 CodeEmitInfo* patching_info = nullptr;
1305 if ((!x->klass()->is_loaded() || PatchALot)) {
1306 // must do this before locking the destination register as an oop register
1307 patching_info = state_for(x, x->state_before());
1308 }
1309 obj.load_item();
1310 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1311 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1312 tmp3 = new_register(objectType);
1313 }
1314 __ instanceof(reg, obj.result(), x->klass(),
1315 new_register(objectType), new_register(objectType), tmp3,
1316 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1317 }
1318
1319 // Intrinsic for Class::isInstance
1320 address LIRGenerator::isInstance_entry() {
1321 return Runtime1::entry_for(StubId::c1_is_instance_of_id);
1322 }
1323
1324 void LIRGenerator::do_If(If* x) {
1325 assert(x->number_of_sux() == 2, "inconsistency");
1326 ValueTag tag = x->x()->type()->tag();
1327 bool is_safepoint = x->is_safepoint();
1328
1329 If::Condition cond = x->cond();
1330
1331 LIRItem xitem(x->x(), this);
1332 LIRItem yitem(x->y(), this);
1333 LIRItem* xin = &xitem;
1334 LIRItem* yin = &yitem;
1335
1336 if (tag == longTag) {
1337 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1338 // mirror for other conditions
1339 if (cond == If::gtr || cond == If::leq) {
1340 cond = Instruction::mirror(cond);
1341 xin = &yitem;
1342 yin = &xitem;
1343 }
1344 xin->set_destroys_register();
1345 }
1346 xin->load_item();
1347
1348 if (tag == longTag) {
1349 if (yin->is_constant()
1350 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1351 yin->dont_load_item();
1352 } else {
1353 yin->load_item();
1354 }
1355 } else if (tag == intTag) {
1356 if (yin->is_constant()
1357 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) {
1358 yin->dont_load_item();
1359 } else {
1360 yin->load_item();
1361 }
1362 } else {
1363 yin->load_item();
1364 }
1365
1366 set_no_result(x);
1367
1368 LIR_Opr left = xin->result();
1369 LIR_Opr right = yin->result();
1370
1371 // add safepoint before generating condition code so it can be recomputed
1372 if (x->is_safepoint()) {
1373 // increment backedge counter if needed
1374 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1375 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1376 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1377 }
1378
1379 __ cmp(lir_cond(cond), left, right);
1380 // Generate branch profiling. Profiling code doesn't kill flags.
1381 profile_branch(x, cond);
1382 move_to_phi(x->state());
1383 if (x->x()->type()->is_float_kind()) {
1384 __ branch(lir_cond(cond), x->tsux(), x->usux());
1385 } else {
1386 __ branch(lir_cond(cond), x->tsux());
1387 }
1388 assert(x->default_sux() == x->fsux(), "wrong destination above");
1389 __ jump(x->default_sux());
1390 }
1391
1392 LIR_Opr LIRGenerator::getThreadPointer() {
1393 return FrameMap::as_pointer_opr(rthread);
1394 }
1395
1396 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1397
1398 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1399 CodeEmitInfo* info) {
1400 __ volatile_store_mem_reg(value, address, info);
1401 }
1402
1403 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1404 CodeEmitInfo* info) {
1405 // 8179954: We need to make sure that the code generated for
1406 // volatile accesses forms a sequentially-consistent set of
1407 // operations when combined with STLR and LDAR. Without a leading
1408 // membar it's possible for a simple Dekker test to fail if loads
1409 // use LD;DMB but stores use STLR. This can happen if C2 compiles
1410 // the stores in one method and C1 compiles the loads in another.
1411 if (!CompilerConfig::is_c1_only_no_jvmci()) {
1412 __ membar();
1413 }
1414 __ volatile_load_mem_reg(address, result, info);
1415 }