1 /*
2 * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_Compilation.hpp"
26 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciFlatArrayKlass.hpp"
34 #include "ci/ciInlineKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "ci/ciObjArray.hpp"
37 #include "ci/ciObjArrayKlass.hpp"
38 #include "ci/ciUtilities.hpp"
39 #include "compiler/compilerDefinitions.inline.hpp"
40 #include "compiler/compilerOracle.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/c1/barrierSetC1.hpp"
43 #include "oops/klass.inline.hpp"
44 #include "oops/methodCounters.hpp"
45 #include "runtime/arguments.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubRoutines.hpp"
48 #include "runtime/vm_version.hpp"
49 #include "utilities/bitMap.inline.hpp"
50 #include "utilities/macros.hpp"
51 #include "utilities/powerOfTwo.hpp"
52
53 #ifdef ASSERT
54 #define __ gen()->lir(__FILE__, __LINE__)->
55 #else
56 #define __ gen()->lir()->
57 #endif
58
59 #ifndef PATCHED_ADDR
60 #define PATCHED_ADDR (max_jint)
61 #endif
62
63 void PhiResolverState::reset() {
64 _virtual_operands.clear();
65 _other_operands.clear();
66 _vreg_table.clear();
67 }
68
69
70 //--------------------------------------------------------------
71 // PhiResolver
72
73 // Resolves cycles:
74 //
75 // r1 := r2 becomes temp := r1
76 // r2 := r1 r1 := r2
77 // r2 := temp
78 // and orders moves:
79 //
80 // r2 := r3 becomes r1 := r2
81 // r1 := r2 r2 := r3
82
83 PhiResolver::PhiResolver(LIRGenerator* gen)
84 : _gen(gen)
85 , _state(gen->resolver_state())
86 , _loop(nullptr)
87 , _temp(LIR_OprFact::illegalOpr)
88 {
89 // reinitialize the shared state arrays
90 _state.reset();
91 }
92
93
94 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
95 assert(src->is_valid(), "");
96 assert(dest->is_valid(), "");
97 __ move(src, dest);
98 }
99
100
101 void PhiResolver::move_temp_to(LIR_Opr dest) {
102 assert(_temp->is_valid(), "");
103 emit_move(_temp, dest);
104 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
105 }
106
107
108 void PhiResolver::move_to_temp(LIR_Opr src) {
109 assert(_temp->is_illegal(), "");
110 _temp = _gen->new_register(src->type());
111 emit_move(src, _temp);
112 }
113
114
115 // Traverse assignment graph in depth first order and generate moves in post order
116 // ie. two assignments: b := c, a := b start with node c:
117 // Call graph: move(null, c) -> move(c, b) -> move(b, a)
118 // Generates moves in this order: move b to a and move c to b
119 // ie. cycle a := b, b := a start with node a
120 // Call graph: move(null, a) -> move(a, b) -> move(b, a)
121 // Generates moves in this order: move b to temp, move a to b, move temp to a
122 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
123 if (!dest->visited()) {
124 dest->set_visited();
125 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
126 move(dest, dest->destination_at(i));
127 }
128 } else if (!dest->start_node()) {
129 // cylce in graph detected
130 assert(_loop == nullptr, "only one loop valid!");
131 _loop = dest;
132 move_to_temp(src->operand());
133 return;
134 } // else dest is a start node
135
136 if (!dest->assigned()) {
137 if (_loop == dest) {
138 move_temp_to(dest->operand());
139 dest->set_assigned();
140 } else if (src != nullptr) {
141 emit_move(src->operand(), dest->operand());
142 dest->set_assigned();
143 }
144 }
145 }
146
147
148 PhiResolver::~PhiResolver() {
149 int i;
150 // resolve any cycles in moves from and to virtual registers
151 for (i = virtual_operands().length() - 1; i >= 0; i --) {
152 ResolveNode* node = virtual_operands().at(i);
153 if (!node->visited()) {
154 _loop = nullptr;
155 move(nullptr, node);
156 node->set_start_node();
157 assert(_temp->is_illegal(), "move_temp_to() call missing");
158 }
159 }
160
161 // generate move for move from non virtual register to abitrary destination
162 for (i = other_operands().length() - 1; i >= 0; i --) {
163 ResolveNode* node = other_operands().at(i);
164 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
165 emit_move(node->operand(), node->destination_at(j)->operand());
166 }
167 }
168 }
169
170
171 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
172 ResolveNode* node;
173 if (opr->is_virtual()) {
174 int vreg_num = opr->vreg_number();
175 node = vreg_table().at_grow(vreg_num, nullptr);
176 assert(node == nullptr || node->operand() == opr, "");
177 if (node == nullptr) {
178 node = new ResolveNode(opr);
179 vreg_table().at_put(vreg_num, node);
180 }
181 // Make sure that all virtual operands show up in the list when
182 // they are used as the source of a move.
183 if (source && !virtual_operands().contains(node)) {
184 virtual_operands().append(node);
185 }
186 } else {
187 assert(source, "");
188 node = new ResolveNode(opr);
189 other_operands().append(node);
190 }
191 return node;
192 }
193
194
195 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
196 assert(dest->is_virtual(), "");
197 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
198 assert(src->is_valid(), "");
199 assert(dest->is_valid(), "");
200 ResolveNode* source = source_node(src);
201 source->append(destination_node(dest));
202 }
203
204
205 //--------------------------------------------------------------
206 // LIRItem
207
208 void LIRItem::set_result(LIR_Opr opr) {
209 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
210 value()->set_operand(opr);
211
212 #ifdef ASSERT
213 if (opr->is_virtual()) {
214 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
215 }
216 #endif
217
218 _result = opr;
219 }
220
221 void LIRItem::load_item() {
222 assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
223
224 if (result()->is_illegal()) {
225 // update the items result
226 _result = value()->operand();
227 }
228 if (!result()->is_register()) {
229 LIR_Opr reg = _gen->new_register(value()->type());
230 __ move(result(), reg);
231 if (result()->is_constant()) {
232 _result = reg;
233 } else {
234 set_result(reg);
235 }
236 }
237 }
238
239
240 void LIRItem::load_for_store(BasicType type) {
241 if (_gen->can_store_as_constant(value(), type)) {
242 _result = value()->operand();
243 if (!_result->is_constant()) {
244 _result = LIR_OprFact::value_type(value()->type());
245 }
246 } else if (type == T_BYTE || type == T_BOOLEAN) {
247 load_byte_item();
248 } else {
249 load_item();
250 }
251 }
252
253 void LIRItem::load_item_force(LIR_Opr reg) {
254 LIR_Opr r = result();
255 if (r != reg) {
256 #if !defined(ARM) && !defined(E500V2)
257 if (r->type() != reg->type()) {
258 // moves between different types need an intervening spill slot
259 r = _gen->force_to_spill(r, reg->type());
260 }
261 #endif
262 __ move(r, reg);
263 _result = reg;
264 }
265 }
266
267 ciObject* LIRItem::get_jobject_constant() const {
268 ObjectType* oc = type()->as_ObjectType();
269 if (oc) {
270 return oc->constant_value();
271 }
272 return nullptr;
273 }
274
275
276 jint LIRItem::get_jint_constant() const {
277 assert(is_constant() && value() != nullptr, "");
278 assert(type()->as_IntConstant() != nullptr, "type check");
279 return type()->as_IntConstant()->value();
280 }
281
282
283 jint LIRItem::get_address_constant() const {
284 assert(is_constant() && value() != nullptr, "");
285 assert(type()->as_AddressConstant() != nullptr, "type check");
286 return type()->as_AddressConstant()->value();
287 }
288
289
290 jfloat LIRItem::get_jfloat_constant() const {
291 assert(is_constant() && value() != nullptr, "");
292 assert(type()->as_FloatConstant() != nullptr, "type check");
293 return type()->as_FloatConstant()->value();
294 }
295
296
297 jdouble LIRItem::get_jdouble_constant() const {
298 assert(is_constant() && value() != nullptr, "");
299 assert(type()->as_DoubleConstant() != nullptr, "type check");
300 return type()->as_DoubleConstant()->value();
301 }
302
303
304 jlong LIRItem::get_jlong_constant() const {
305 assert(is_constant() && value() != nullptr, "");
306 assert(type()->as_LongConstant() != nullptr, "type check");
307 return type()->as_LongConstant()->value();
308 }
309
310
311
312 //--------------------------------------------------------------
313
314
315 void LIRGenerator::block_do_prolog(BlockBegin* block) {
316 #ifndef PRODUCT
317 if (PrintIRWithLIR) {
318 block->print();
319 }
320 #endif
321
322 // set up the list of LIR instructions
323 assert(block->lir() == nullptr, "LIR list already computed for this block");
324 _lir = new LIR_List(compilation(), block);
325 block->set_lir(_lir);
326
327 __ branch_destination(block->label());
328
329 if (LIRTraceExecution &&
330 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
331 !block->is_set(BlockBegin::exception_entry_flag)) {
332 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
333 trace_block_entry(block);
334 }
335 }
336
337
338 void LIRGenerator::block_do_epilog(BlockBegin* block) {
339 #ifndef PRODUCT
340 if (PrintIRWithLIR) {
341 tty->cr();
342 }
343 #endif
344
345 // LIR_Opr for unpinned constants shouldn't be referenced by other
346 // blocks so clear them out after processing the block.
347 for (int i = 0; i < _unpinned_constants.length(); i++) {
348 _unpinned_constants.at(i)->clear_operand();
349 }
350 _unpinned_constants.trunc_to(0);
351
352 // clear our any registers for other local constants
353 _constants.trunc_to(0);
354 _reg_for_constants.trunc_to(0);
355 }
356
357
358 void LIRGenerator::block_do(BlockBegin* block) {
359 CHECK_BAILOUT();
360
361 block_do_prolog(block);
362 set_block(block);
363
364 for (Instruction* instr = block; instr != nullptr; instr = instr->next()) {
365 if (instr->is_pinned()) do_root(instr);
366 }
367
368 set_block(nullptr);
369 block_do_epilog(block);
370 }
371
372
373 //-------------------------LIRGenerator-----------------------------
374
375 // This is where the tree-walk starts; instr must be root;
376 void LIRGenerator::do_root(Value instr) {
377 CHECK_BAILOUT();
378
379 InstructionMark im(compilation(), instr);
380
381 assert(instr->is_pinned(), "use only with roots");
382 assert(instr->subst() == instr, "shouldn't have missed substitution");
383
384 instr->visit(this);
385
386 assert(!instr->has_uses() || instr->operand()->is_valid() ||
387 instr->as_Constant() != nullptr || bailed_out(), "invalid item set");
388 }
389
390
391 // This is called for each node in tree; the walk stops if a root is reached
392 void LIRGenerator::walk(Value instr) {
393 InstructionMark im(compilation(), instr);
394 //stop walk when encounter a root
395 if ((instr->is_pinned() && instr->as_Phi() == nullptr) || instr->operand()->is_valid()) {
396 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != nullptr, "this root has not yet been visited");
397 } else {
398 assert(instr->subst() == instr, "shouldn't have missed substitution");
399 instr->visit(this);
400 // assert(instr->use_count() > 0 || instr->as_Phi() != nullptr, "leaf instruction must have a use");
401 }
402 }
403
404
405 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
406 assert(state != nullptr, "state must be defined");
407
408 #ifndef PRODUCT
409 state->verify();
410 #endif
411
412 ValueStack* s = state;
413 for_each_state(s) {
414 if (s->kind() == ValueStack::EmptyExceptionState ||
415 s->kind() == ValueStack::CallerEmptyExceptionState)
416 {
417 #ifdef ASSERT
418 int index;
419 Value value;
420 for_each_stack_value(s, index, value) {
421 fatal("state must be empty");
422 }
423 for_each_local_value(s, index, value) {
424 fatal("state must be empty");
425 }
426 #endif
427 assert(s->locks_size() == 0 || s->locks_size() == 1, "state must be empty");
428 continue;
429 }
430
431 int index;
432 Value value;
433 for_each_stack_value(s, index, value) {
434 assert(value->subst() == value, "missed substitution");
435 if (!value->is_pinned() && value->as_Constant() == nullptr && value->as_Local() == nullptr) {
436 walk(value);
437 assert(value->operand()->is_valid(), "must be evaluated now");
438 }
439 }
440
441 int bci = s->bci();
442 IRScope* scope = s->scope();
443 ciMethod* method = scope->method();
444
445 MethodLivenessResult liveness = method->liveness_at_bci(bci);
446 if (bci == SynchronizationEntryBCI) {
447 if (x->as_ExceptionObject() || x->as_Throw()) {
448 // all locals are dead on exit from the synthetic unlocker
449 liveness.clear();
450 } else {
451 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
452 }
453 }
454 if (!liveness.is_valid()) {
455 // Degenerate or breakpointed method.
456 bailout("Degenerate or breakpointed method");
457 } else {
458 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
459 for_each_local_value(s, index, value) {
460 assert(value->subst() == value, "missed substitution");
461 if (liveness.at(index) && !value->type()->is_illegal()) {
462 if (!value->is_pinned() && value->as_Constant() == nullptr && value->as_Local() == nullptr) {
463 walk(value);
464 assert(value->operand()->is_valid(), "must be evaluated now");
465 }
466 } else {
467 // null out this local so that linear scan can assume that all non-null values are live.
468 s->invalidate_local(index);
469 }
470 }
471 }
472 }
473
474 return new CodeEmitInfo(state, ignore_xhandler ? nullptr : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
475 }
476
477
478 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
479 return state_for(x, x->exception_state());
480 }
481
482
483 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
484 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation
485 * is active and the class hasn't yet been resolved we need to emit a patch that resolves
486 * the class. */
487 if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
488 assert(info != nullptr, "info must be set if class is not loaded");
489 __ klass2reg_patch(nullptr, r, info);
490 } else {
491 // no patching needed
492 __ metadata2reg(obj->constant_encoding(), r);
493 }
494 }
495
496
497 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
498 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
499 CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
500 if (index->is_constant()) {
501 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
502 index->as_jint(), null_check_info);
503 __ branch(lir_cond_belowEqual, stub); // forward branch
504 } else {
505 cmp_reg_mem(lir_cond_aboveEqual, index, array,
506 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
507 __ branch(lir_cond_aboveEqual, stub); // forward branch
508 }
509 }
510
511 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp_op, CodeEmitInfo* info) {
512 LIR_Opr result_op = result;
513 LIR_Opr left_op = left;
514 LIR_Opr right_op = right;
515
516 if (two_operand_lir_form && left_op != result_op) {
517 assert(right_op != result_op, "malformed");
518 __ move(left_op, result_op);
519 left_op = result_op;
520 }
521
522 switch(code) {
523 case Bytecodes::_dadd:
524 case Bytecodes::_fadd:
525 case Bytecodes::_ladd:
526 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
527 case Bytecodes::_fmul:
528 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
529
530 case Bytecodes::_dmul: __ mul(left_op, right_op, result_op, tmp_op); break;
531
532 case Bytecodes::_imul:
533 {
534 bool did_strength_reduce = false;
535
536 if (right->is_constant()) {
537 jint c = right->as_jint();
538 if (c > 0 && is_power_of_2(c)) {
539 // do not need tmp here
540 __ shift_left(left_op, exact_log2(c), result_op);
541 did_strength_reduce = true;
542 } else {
543 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
544 }
545 }
546 // we couldn't strength reduce so just emit the multiply
547 if (!did_strength_reduce) {
548 __ mul(left_op, right_op, result_op);
549 }
550 }
551 break;
552
553 case Bytecodes::_dsub:
554 case Bytecodes::_fsub:
555 case Bytecodes::_lsub:
556 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
557
558 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
559 // ldiv and lrem are implemented with a direct runtime call
560
561 case Bytecodes::_ddiv: __ div(left_op, right_op, result_op, tmp_op); break;
562
563 case Bytecodes::_drem:
564 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
565
566 default: ShouldNotReachHere();
567 }
568 }
569
570
571 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
572 arithmetic_op(code, result, left, right, tmp);
573 }
574
575
576 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
577 arithmetic_op(code, result, left, right, LIR_OprFact::illegalOpr, info);
578 }
579
580
581 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
582 arithmetic_op(code, result, left, right, tmp);
583 }
584
585
586 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
587
588 if (two_operand_lir_form && value != result_op
589 // Only 32bit right shifts require two operand form on S390.
590 S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {
591 assert(count != result_op, "malformed");
592 __ move(value, result_op);
593 value = result_op;
594 }
595
596 assert(count->is_constant() || count->is_register(), "must be");
597 switch(code) {
598 case Bytecodes::_ishl:
599 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
600 case Bytecodes::_ishr:
601 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
602 case Bytecodes::_iushr:
603 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
604 default: ShouldNotReachHere();
605 }
606 }
607
608
609 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
610 if (two_operand_lir_form && left_op != result_op) {
611 assert(right_op != result_op, "malformed");
612 __ move(left_op, result_op);
613 left_op = result_op;
614 }
615
616 switch(code) {
617 case Bytecodes::_iand:
618 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
619
620 case Bytecodes::_ior:
621 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
622
623 case Bytecodes::_ixor:
624 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
625
626 default: ShouldNotReachHere();
627 }
628 }
629
630
631 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
632 CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_ie_stub) {
633 // for slow path, use debug info for state after successful locking
634 CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_ie_stub, scratch);
635 __ load_stack_address_monitor(monitor_no, lock);
636 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
637 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_ie_stub);
638 }
639
640
641 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
642 // setup registers
643 LIR_Opr hdr = lock;
644 lock = new_hdr;
645 CodeStub* slow_path = new MonitorExitStub(lock, monitor_no);
646 __ load_stack_address_monitor(monitor_no, lock);
647 __ unlock_object(hdr, object, lock, scratch, slow_path);
648 }
649
650 #ifndef PRODUCT
651 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
652 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
653 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
654 } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
655 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
656 }
657 }
658 #endif
659
660 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
661 if (allow_inline) {
662 assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved");
663 __ metadata2reg(klass->constant_encoding(), klass_reg);
664 } else {
665 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
666 }
667 // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass
668 if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype())
669 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
670
671 StubId stub_id = klass->is_initialized() ? StubId::c1_fast_new_instance_id : StubId::c1_fast_new_instance_init_check_id;
672
673 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
674
675 assert(klass->is_loaded(), "must be loaded");
676 // allocate space for instance
677 assert(klass->size_helper() > 0, "illegal instance size");
678 const int instance_size = align_object_size(klass->size_helper());
679 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
680 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
681 } else {
682 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, StubId::c1_new_instance_id);
683 __ jump(slow_path);
684 __ branch_destination(slow_path->continuation());
685 }
686 }
687
688
689 static bool is_constant_zero(Instruction* inst) {
690 IntConstant* c = inst->type()->as_IntConstant();
691 if (c) {
692 return (c->value() == 0);
693 }
694 return false;
695 }
696
697
698 static bool positive_constant(Instruction* inst) {
699 IntConstant* c = inst->type()->as_IntConstant();
700 if (c) {
701 return (c->value() >= 0);
702 }
703 return false;
704 }
705
706
707 static ciArrayKlass* as_array_klass(ciType* type) {
708 if (type != nullptr && type->is_array_klass() && type->is_loaded()) {
709 return (ciArrayKlass*)type;
710 } else {
711 return nullptr;
712 }
713 }
714
715 static ciType* phi_declared_type(Phi* phi) {
716 ciType* t = phi->operand_at(0)->declared_type();
717 if (t == nullptr) {
718 return nullptr;
719 }
720 for(int i = 1; i < phi->operand_count(); i++) {
721 if (t != phi->operand_at(i)->declared_type()) {
722 return nullptr;
723 }
724 }
725 return t;
726 }
727
728 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
729 Instruction* src = x->argument_at(0);
730 Instruction* src_pos = x->argument_at(1);
731 Instruction* dst = x->argument_at(2);
732 Instruction* dst_pos = x->argument_at(3);
733 Instruction* length = x->argument_at(4);
734
735 // first try to identify the likely type of the arrays involved
736 ciArrayKlass* expected_type = nullptr;
737 bool is_exact = false, src_objarray = false, dst_objarray = false;
738 {
739 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
740 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
741 Phi* phi;
742 if (src_declared_type == nullptr && (phi = src->as_Phi()) != nullptr) {
743 src_declared_type = as_array_klass(phi_declared_type(phi));
744 }
745 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
746 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
747 if (dst_declared_type == nullptr && (phi = dst->as_Phi()) != nullptr) {
748 dst_declared_type = as_array_klass(phi_declared_type(phi));
749 }
750
751 if (src_exact_type != nullptr && src_exact_type == dst_exact_type) {
752 // the types exactly match so the type is fully known
753 is_exact = true;
754 expected_type = src_exact_type;
755 } else if (dst_exact_type != nullptr && dst_exact_type->is_obj_array_klass()) {
756 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
757 ciArrayKlass* src_type = nullptr;
758 if (src_exact_type != nullptr && src_exact_type->is_obj_array_klass()) {
759 src_type = (ciArrayKlass*) src_exact_type;
760 } else if (src_declared_type != nullptr && src_declared_type->is_obj_array_klass()) {
761 src_type = (ciArrayKlass*) src_declared_type;
762 }
763 if (src_type != nullptr) {
764 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
765 is_exact = true;
766 expected_type = dst_type;
767 }
768 }
769 }
770 // at least pass along a good guess
771 if (expected_type == nullptr) expected_type = dst_exact_type;
772 if (expected_type == nullptr) expected_type = src_declared_type;
773 if (expected_type == nullptr) expected_type = dst_declared_type;
774
775 if (expected_type != nullptr && expected_type->is_obj_array_klass()) {
776 // For a direct pointer comparison, we need the refined array klass pointer
777 expected_type = ciObjArrayKlass::make(expected_type->as_array_klass()->element_klass());
778 }
779
780 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
781 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
782 }
783
784 // if a probable array type has been identified, figure out if any
785 // of the required checks for a fast case can be elided.
786 int flags = LIR_OpArrayCopy::all_flags;
787
788 if (!src->is_loaded_flat_array() && !dst->is_loaded_flat_array()) {
789 flags &= ~LIR_OpArrayCopy::always_slow_path;
790 }
791 if (!src->maybe_flat_array()) {
792 flags &= ~LIR_OpArrayCopy::src_inlinetype_check;
793 }
794 if (!dst->maybe_flat_array() && !dst->maybe_null_free_array()) {
795 flags &= ~LIR_OpArrayCopy::dst_inlinetype_check;
796 }
797
798 if (!src_objarray)
799 flags &= ~LIR_OpArrayCopy::src_objarray;
800 if (!dst_objarray)
801 flags &= ~LIR_OpArrayCopy::dst_objarray;
802
803 if (!x->arg_needs_null_check(0))
804 flags &= ~LIR_OpArrayCopy::src_null_check;
805 if (!x->arg_needs_null_check(2))
806 flags &= ~LIR_OpArrayCopy::dst_null_check;
807
808
809 if (expected_type != nullptr) {
810 Value length_limit = nullptr;
811
812 IfOp* ifop = length->as_IfOp();
813 if (ifop != nullptr) {
814 // look for expressions like min(v, a.length) which ends up as
815 // x > y ? y : x or x >= y ? y : x
816 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
817 ifop->x() == ifop->fval() &&
818 ifop->y() == ifop->tval()) {
819 length_limit = ifop->y();
820 }
821 }
822
823 // try to skip null checks and range checks
824 NewArray* src_array = src->as_NewArray();
825 if (src_array != nullptr) {
826 flags &= ~LIR_OpArrayCopy::src_null_check;
827 if (length_limit != nullptr &&
828 src_array->length() == length_limit &&
829 is_constant_zero(src_pos)) {
830 flags &= ~LIR_OpArrayCopy::src_range_check;
831 }
832 }
833
834 NewArray* dst_array = dst->as_NewArray();
835 if (dst_array != nullptr) {
836 flags &= ~LIR_OpArrayCopy::dst_null_check;
837 if (length_limit != nullptr &&
838 dst_array->length() == length_limit &&
839 is_constant_zero(dst_pos)) {
840 flags &= ~LIR_OpArrayCopy::dst_range_check;
841 }
842 }
843
844 // check from incoming constant values
845 if (positive_constant(src_pos))
846 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
847 if (positive_constant(dst_pos))
848 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
849 if (positive_constant(length))
850 flags &= ~LIR_OpArrayCopy::length_positive_check;
851
852 // see if the range check can be elided, which might also imply
853 // that src or dst is non-null.
854 ArrayLength* al = length->as_ArrayLength();
855 if (al != nullptr) {
856 if (al->array() == src) {
857 // it's the length of the source array
858 flags &= ~LIR_OpArrayCopy::length_positive_check;
859 flags &= ~LIR_OpArrayCopy::src_null_check;
860 if (is_constant_zero(src_pos))
861 flags &= ~LIR_OpArrayCopy::src_range_check;
862 }
863 if (al->array() == dst) {
864 // it's the length of the destination array
865 flags &= ~LIR_OpArrayCopy::length_positive_check;
866 flags &= ~LIR_OpArrayCopy::dst_null_check;
867 if (is_constant_zero(dst_pos))
868 flags &= ~LIR_OpArrayCopy::dst_range_check;
869 }
870 }
871 if (is_exact) {
872 flags &= ~LIR_OpArrayCopy::type_check;
873 }
874 }
875
876 IntConstant* src_int = src_pos->type()->as_IntConstant();
877 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
878 if (src_int && dst_int) {
879 int s_offs = src_int->value();
880 int d_offs = dst_int->value();
881 if (src_int->value() >= dst_int->value()) {
882 flags &= ~LIR_OpArrayCopy::overlapping;
883 }
884 if (expected_type != nullptr) {
885 BasicType t = expected_type->element_type()->basic_type();
886 int element_size = type2aelembytes(t);
887 if (((arrayOopDesc::base_offset_in_bytes(t) + (uint)s_offs * element_size) % HeapWordSize == 0) &&
888 ((arrayOopDesc::base_offset_in_bytes(t) + (uint)d_offs * element_size) % HeapWordSize == 0)) {
889 flags &= ~LIR_OpArrayCopy::unaligned;
890 }
891 }
892 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
893 // src and dest positions are the same, or dst is zero so assume
894 // nonoverlapping copy.
895 flags &= ~LIR_OpArrayCopy::overlapping;
896 }
897
898 if (src == dst) {
899 // moving within a single array so no type checks are needed
900 if (flags & LIR_OpArrayCopy::type_check) {
901 flags &= ~LIR_OpArrayCopy::type_check;
902 }
903 }
904 *flagsp = flags;
905 *expected_typep = (ciArrayKlass*)expected_type;
906 }
907
908
909 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
910 assert(type2size[t] == type2size[value->type()],
911 "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
912 if (!value->is_register()) {
913 // force into a register
914 LIR_Opr r = new_register(value->type());
915 __ move(value, r);
916 value = r;
917 }
918
919 // create a spill location
920 LIR_Opr tmp = new_register(t);
921 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
922
923 // move from register to spill
924 __ move(value, tmp);
925 return tmp;
926 }
927
928 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
929 if (if_instr->should_profile()) {
930 ciMethod* method = if_instr->profiled_method();
931 assert(method != nullptr, "method should be set if branch is profiled");
932 ciMethodData* md = method->method_data_or_null();
933 assert(md != nullptr, "Sanity");
934 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
935 assert(data != nullptr, "must have profiling data");
936 assert(data->is_BranchData(), "need BranchData for two-way branches");
937 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
938 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
939 if (if_instr->is_swapped()) {
940 int t = taken_count_offset;
941 taken_count_offset = not_taken_count_offset;
942 not_taken_count_offset = t;
943 }
944
945 LIR_Opr md_reg = new_register(T_METADATA);
946 __ metadata2reg(md->constant_encoding(), md_reg);
947
948 LIR_Opr data_offset_reg = new_pointer_register();
949 __ cmove(lir_cond(cond),
950 LIR_OprFact::intptrConst(taken_count_offset),
951 LIR_OprFact::intptrConst(not_taken_count_offset),
952 data_offset_reg, as_BasicType(if_instr->x()->type()));
953
954 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
955 LIR_Opr data_reg = new_pointer_register();
956 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
957 __ move(data_addr, data_reg);
958 // Use leal instead of add to avoid destroying condition codes on x86
959 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
960 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
961 __ move(data_reg, data_addr);
962 }
963 }
964
965 // Phi technique:
966 // This is about passing live values from one basic block to the other.
967 // In code generated with Java it is rather rare that more than one
968 // value is on the stack from one basic block to the other.
969 // We optimize our technique for efficient passing of one value
970 // (of type long, int, double..) but it can be extended.
971 // When entering or leaving a basic block, all registers and all spill
972 // slots are release and empty. We use the released registers
973 // and spill slots to pass the live values from one block
974 // to the other. The topmost value, i.e., the value on TOS of expression
975 // stack is passed in registers. All other values are stored in spilling
976 // area. Every Phi has an index which designates its spill slot
977 // At exit of a basic block, we fill the register(s) and spill slots.
978 // At entry of a basic block, the block_prolog sets up the content of phi nodes
979 // and locks necessary registers and spilling slots.
980
981
982 // move current value to referenced phi function
983 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
984 Phi* phi = sux_val->as_Phi();
985 // cur_val can be null without phi being null in conjunction with inlining
986 if (phi != nullptr && cur_val != nullptr && cur_val != phi && !phi->is_illegal()) {
987 if (phi->is_local()) {
988 for (int i = 0; i < phi->operand_count(); i++) {
989 Value op = phi->operand_at(i);
990 if (op != nullptr && op->type()->is_illegal()) {
991 bailout("illegal phi operand");
992 }
993 }
994 }
995 Phi* cur_phi = cur_val->as_Phi();
996 if (cur_phi != nullptr && cur_phi->is_illegal()) {
997 // Phi and local would need to get invalidated
998 // (which is unexpected for Linear Scan).
999 // But this case is very rare so we simply bail out.
1000 bailout("propagation of illegal phi");
1001 return;
1002 }
1003 LIR_Opr operand = cur_val->operand();
1004 if (operand->is_illegal()) {
1005 assert(cur_val->as_Constant() != nullptr || cur_val->as_Local() != nullptr,
1006 "these can be produced lazily");
1007 operand = operand_for_instruction(cur_val);
1008 }
1009 resolver->move(operand, operand_for_instruction(phi));
1010 }
1011 }
1012
1013
1014 // Moves all stack values into their PHI position
1015 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1016 BlockBegin* bb = block();
1017 if (bb->number_of_sux() == 1) {
1018 BlockBegin* sux = bb->sux_at(0);
1019 assert(sux->number_of_preds() > 0, "invalid CFG");
1020
1021 // a block with only one predecessor never has phi functions
1022 if (sux->number_of_preds() > 1) {
1023 PhiResolver resolver(this);
1024
1025 ValueStack* sux_state = sux->state();
1026 Value sux_value;
1027 int index;
1028
1029 assert(cur_state->scope() == sux_state->scope(), "not matching");
1030 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1031 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1032
1033 for_each_stack_value(sux_state, index, sux_value) {
1034 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1035 }
1036
1037 for_each_local_value(sux_state, index, sux_value) {
1038 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1039 }
1040
1041 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1042 }
1043 }
1044 }
1045
1046
1047 LIR_Opr LIRGenerator::new_register(BasicType type) {
1048 int vreg_num = _virtual_register_number;
1049 // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out
1050 // a few extra registers before we really run out which helps to avoid to trip over assertions.
1051 if (vreg_num + 20 >= LIR_Opr::vreg_max) {
1052 bailout("out of virtual registers in LIR generator");
1053 if (vreg_num + 2 >= LIR_Opr::vreg_max) {
1054 // Wrap it around and continue until bailout really happens to avoid hitting assertions.
1055 _virtual_register_number = LIR_Opr::vreg_base;
1056 vreg_num = LIR_Opr::vreg_base;
1057 }
1058 }
1059 _virtual_register_number += 1;
1060 LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type);
1061 assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");
1062 return vreg;
1063 }
1064
1065
1066 // Try to lock using register in hint
1067 LIR_Opr LIRGenerator::rlock(Value instr) {
1068 return new_register(instr->type());
1069 }
1070
1071
1072 // does an rlock and sets result
1073 LIR_Opr LIRGenerator::rlock_result(Value x) {
1074 LIR_Opr reg = rlock(x);
1075 set_result(x, reg);
1076 return reg;
1077 }
1078
1079
1080 // does an rlock and sets result
1081 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1082 LIR_Opr reg;
1083 switch (type) {
1084 case T_BYTE:
1085 case T_BOOLEAN:
1086 reg = rlock_byte(type);
1087 break;
1088 default:
1089 reg = rlock(x);
1090 break;
1091 }
1092
1093 set_result(x, reg);
1094 return reg;
1095 }
1096
1097
1098 //---------------------------------------------------------------------
1099 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1100 ObjectType* oc = value->type()->as_ObjectType();
1101 if (oc) {
1102 return oc->constant_value();
1103 }
1104 return nullptr;
1105 }
1106
1107
1108 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1109 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1110 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1111
1112 // no moves are created for phi functions at the begin of exception
1113 // handlers, so assign operands manually here
1114 for_each_phi_fun(block(), phi,
1115 if (!phi->is_illegal()) { operand_for_instruction(phi); });
1116
1117 LIR_Opr thread_reg = getThreadPointer();
1118 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1119 exceptionOopOpr());
1120 __ move_wide(LIR_OprFact::oopConst(nullptr),
1121 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1122 __ move_wide(LIR_OprFact::oopConst(nullptr),
1123 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1124
1125 LIR_Opr result = new_register(T_OBJECT);
1126 __ move(exceptionOopOpr(), result);
1127 set_result(x, result);
1128 }
1129
1130
1131 //----------------------------------------------------------------------
1132 //----------------------------------------------------------------------
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 // visitor functions
1136 //----------------------------------------------------------------------
1137 //----------------------------------------------------------------------
1138 //----------------------------------------------------------------------
1139 //----------------------------------------------------------------------
1140
1141 void LIRGenerator::do_Phi(Phi* x) {
1142 // phi functions are never visited directly
1143 ShouldNotReachHere();
1144 }
1145
1146
1147 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1148 void LIRGenerator::do_Constant(Constant* x) {
1149 if (x->state_before() != nullptr) {
1150 // Any constant with a ValueStack requires patching so emit the patch here
1151 LIR_Opr reg = rlock_result(x);
1152 CodeEmitInfo* info = state_for(x, x->state_before());
1153 __ oop2reg_patch(nullptr, reg, info);
1154 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1155 if (!x->is_pinned()) {
1156 // unpinned constants are handled specially so that they can be
1157 // put into registers when they are used multiple times within a
1158 // block. After the block completes their operand will be
1159 // cleared so that other blocks can't refer to that register.
1160 set_result(x, load_constant(x));
1161 } else {
1162 LIR_Opr res = x->operand();
1163 if (!res->is_valid()) {
1164 res = LIR_OprFact::value_type(x->type());
1165 }
1166 if (res->is_constant()) {
1167 LIR_Opr reg = rlock_result(x);
1168 __ move(res, reg);
1169 } else {
1170 set_result(x, res);
1171 }
1172 }
1173 } else {
1174 set_result(x, LIR_OprFact::value_type(x->type()));
1175 }
1176 }
1177
1178
1179 void LIRGenerator::do_Local(Local* x) {
1180 // operand_for_instruction has the side effect of setting the result
1181 // so there's no need to do it here.
1182 operand_for_instruction(x);
1183 }
1184
1185
1186 void LIRGenerator::do_Return(Return* x) {
1187 if (compilation()->env()->dtrace_method_probes()) {
1188 BasicTypeList signature;
1189 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1190 signature.append(T_METADATA); // Method*
1191 LIR_OprList* args = new LIR_OprList();
1192 args->append(getThreadPointer());
1193 LIR_Opr meth = new_register(T_METADATA);
1194 __ metadata2reg(method()->constant_encoding(), meth);
1195 args->append(meth);
1196 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, nullptr);
1197 }
1198
1199 if (x->type()->is_void()) {
1200 __ return_op(LIR_OprFact::illegalOpr);
1201 } else {
1202 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1203 LIRItem result(x->result(), this);
1204
1205 result.load_item_force(reg);
1206 __ return_op(result.result());
1207 }
1208 set_no_result(x);
1209 }
1210
1211 // Example: ref.get()
1212 // Combination of LoadField and g1 pre-write barrier
1213 void LIRGenerator::do_Reference_get0(Intrinsic* x) {
1214
1215 const int referent_offset = java_lang_ref_Reference::referent_offset();
1216
1217 assert(x->number_of_arguments() == 1, "wrong type");
1218
1219 LIRItem reference(x->argument_at(0), this);
1220 reference.load_item();
1221
1222 // need to perform the null check on the reference object
1223 CodeEmitInfo* info = nullptr;
1224 if (x->needs_null_check()) {
1225 info = state_for(x);
1226 }
1227
1228 LIR_Opr result = rlock_result(x, T_OBJECT);
1229 access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
1230 reference, LIR_OprFact::intConst(referent_offset), result,
1231 nullptr, info);
1232 }
1233
1234 // Example: clazz.isInstance(object)
1235 void LIRGenerator::do_isInstance(Intrinsic* x) {
1236 assert(x->number_of_arguments() == 2, "wrong type");
1237
1238 LIRItem clazz(x->argument_at(0), this);
1239 LIRItem object(x->argument_at(1), this);
1240 clazz.load_item();
1241 object.load_item();
1242 LIR_Opr result = rlock_result(x);
1243
1244 // need to perform null check on clazz
1245 if (x->needs_null_check()) {
1246 CodeEmitInfo* info = state_for(x);
1247 __ null_check(clazz.result(), info);
1248 }
1249
1250 address pd_instanceof_fn = isInstance_entry();
1251 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1252 pd_instanceof_fn,
1253 x->type(),
1254 nullptr); // null CodeEmitInfo results in a leaf call
1255 __ move(call_result, result);
1256 }
1257
1258 void LIRGenerator::load_klass(LIR_Opr obj, LIR_Opr klass, CodeEmitInfo* null_check_info) {
1259 __ load_klass(obj, klass, null_check_info);
1260 }
1261
1262 // Example: object.getClass ()
1263 void LIRGenerator::do_getClass(Intrinsic* x) {
1264 assert(x->number_of_arguments() == 1, "wrong type");
1265
1266 LIRItem rcvr(x->argument_at(0), this);
1267 rcvr.load_item();
1268 LIR_Opr temp = new_register(T_ADDRESS);
1269 LIR_Opr result = rlock_result(x);
1270
1271 // need to perform the null check on the rcvr
1272 CodeEmitInfo* info = nullptr;
1273 if (x->needs_null_check()) {
1274 info = state_for(x);
1275 }
1276
1277 LIR_Opr klass = new_register(T_METADATA);
1278 load_klass(rcvr.result(), klass, info);
1279 __ move_wide(new LIR_Address(klass, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);
1280 // mirror = ((OopHandle)mirror)->resolve();
1281 access_load(IN_NATIVE, T_OBJECT,
1282 LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
1283 }
1284
1285 void LIRGenerator::do_getObjectSize(Intrinsic* x) {
1286 assert(x->number_of_arguments() == 3, "wrong type");
1287 LIR_Opr result_reg = rlock_result(x);
1288
1289 LIRItem value(x->argument_at(2), this);
1290 value.load_item();
1291
1292 LIR_Opr klass = new_register(T_METADATA);
1293 load_klass(value.result(), klass, nullptr);
1294 LIR_Opr layout = new_register(T_INT);
1295 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1296
1297 LabelObj* L_done = new LabelObj();
1298 LabelObj* L_array = new LabelObj();
1299
1300 __ cmp(lir_cond_lessEqual, layout, 0);
1301 __ branch(lir_cond_lessEqual, L_array->label());
1302
1303 // Instance case: the layout helper gives us instance size almost directly,
1304 // but we need to mask out the _lh_instance_slow_path_bit.
1305
1306 assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
1307
1308 LIR_Opr mask = load_immediate(~(jint) right_n_bits(LogBytesPerLong), T_INT);
1309 __ logical_and(layout, mask, layout);
1310 __ convert(Bytecodes::_i2l, layout, result_reg);
1311
1312 __ branch(lir_cond_always, L_done->label());
1313
1314 // Array case: size is round(header + element_size*arraylength).
1315 // Since arraylength is different for every array instance, we have to
1316 // compute the whole thing at runtime.
1317
1318 __ branch_destination(L_array->label());
1319
1320 int round_mask = MinObjAlignmentInBytes - 1;
1321
1322 // Figure out header sizes first.
1323 LIR_Opr hss = load_immediate(Klass::_lh_header_size_shift, T_INT);
1324 LIR_Opr hsm = load_immediate(Klass::_lh_header_size_mask, T_INT);
1325
1326 LIR_Opr header_size = new_register(T_INT);
1327 __ move(layout, header_size);
1328 LIR_Opr tmp = new_register(T_INT);
1329 __ unsigned_shift_right(header_size, hss, header_size, tmp);
1330 __ logical_and(header_size, hsm, header_size);
1331 __ add(header_size, LIR_OprFact::intConst(round_mask), header_size);
1332
1333 // Figure out the array length in bytes
1334 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
1335 LIR_Opr l2esm = load_immediate(Klass::_lh_log2_element_size_mask, T_INT);
1336 __ logical_and(layout, l2esm, layout);
1337
1338 LIR_Opr length_int = new_register(T_INT);
1339 __ move(new LIR_Address(value.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), length_int);
1340
1341 #ifdef _LP64
1342 LIR_Opr length = new_register(T_LONG);
1343 __ convert(Bytecodes::_i2l, length_int, length);
1344 #endif
1345
1346 // Shift-left awkwardness. Normally it is just:
1347 // __ shift_left(length, layout, length);
1348 // But C1 cannot perform shift_left with non-constant count, so we end up
1349 // doing the per-bit loop dance here. x86_32 also does not know how to shift
1350 // longs, so we have to act on ints.
1351 LabelObj* L_shift_loop = new LabelObj();
1352 LabelObj* L_shift_exit = new LabelObj();
1353
1354 __ branch_destination(L_shift_loop->label());
1355 __ cmp(lir_cond_equal, layout, 0);
1356 __ branch(lir_cond_equal, L_shift_exit->label());
1357
1358 #ifdef _LP64
1359 __ shift_left(length, 1, length);
1360 #else
1361 __ shift_left(length_int, 1, length_int);
1362 #endif
1363
1364 __ sub(layout, LIR_OprFact::intConst(1), layout);
1365
1366 __ branch(lir_cond_always, L_shift_loop->label());
1367 __ branch_destination(L_shift_exit->label());
1368
1369 // Mix all up, round, and push to the result.
1370 #ifdef _LP64
1371 LIR_Opr header_size_long = new_register(T_LONG);
1372 __ convert(Bytecodes::_i2l, header_size, header_size_long);
1373 __ add(length, header_size_long, length);
1374 if (round_mask != 0) {
1375 LIR_Opr round_mask_opr = load_immediate(~(jlong)round_mask, T_LONG);
1376 __ logical_and(length, round_mask_opr, length);
1377 }
1378 __ move(length, result_reg);
1379 #else
1380 __ add(length_int, header_size, length_int);
1381 if (round_mask != 0) {
1382 LIR_Opr round_mask_opr = load_immediate(~round_mask, T_INT);
1383 __ logical_and(length_int, round_mask_opr, length_int);
1384 }
1385 __ convert(Bytecodes::_i2l, length_int, result_reg);
1386 #endif
1387
1388 __ branch_destination(L_done->label());
1389 }
1390
1391 void LIRGenerator::do_scopedValueCache(Intrinsic* x) {
1392 do_JavaThreadField(x, JavaThread::scopedValueCache_offset());
1393 }
1394
1395 // Example: Thread.currentCarrierThread()
1396 void LIRGenerator::do_currentCarrierThread(Intrinsic* x) {
1397 do_JavaThreadField(x, JavaThread::threadObj_offset());
1398 }
1399
1400 void LIRGenerator::do_vthread(Intrinsic* x) {
1401 do_JavaThreadField(x, JavaThread::vthread_offset());
1402 }
1403
1404 void LIRGenerator::do_JavaThreadField(Intrinsic* x, ByteSize offset) {
1405 assert(x->number_of_arguments() == 0, "wrong type");
1406 LIR_Opr temp = new_register(T_ADDRESS);
1407 LIR_Opr reg = rlock_result(x);
1408 __ move(new LIR_Address(getThreadPointer(), in_bytes(offset), T_ADDRESS), temp);
1409 access_load(IN_NATIVE, T_OBJECT,
1410 LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg);
1411 }
1412
1413 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1414 assert(x->number_of_arguments() == 1, "wrong type");
1415 LIRItem receiver(x->argument_at(0), this);
1416
1417 receiver.load_item();
1418 BasicTypeList signature;
1419 signature.append(T_OBJECT); // receiver
1420 LIR_OprList* args = new LIR_OprList();
1421 args->append(receiver.result());
1422 CodeEmitInfo* info = state_for(x, x->state());
1423 call_runtime(&signature, args,
1424 CAST_FROM_FN_PTR(address, Runtime1::entry_for(StubId::c1_register_finalizer_id)),
1425 voidType, info);
1426
1427 set_no_result(x);
1428 }
1429
1430
1431 //------------------------local access--------------------------------------
1432
1433 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1434 if (x->operand()->is_illegal()) {
1435 Constant* c = x->as_Constant();
1436 if (c != nullptr) {
1437 x->set_operand(LIR_OprFact::value_type(c->type()));
1438 } else {
1439 assert(x->as_Phi() || x->as_Local() != nullptr, "only for Phi and Local");
1440 // allocate a virtual register for this local or phi
1441 x->set_operand(rlock(x));
1442 #ifdef ASSERT
1443 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, nullptr);
1444 #endif
1445 }
1446 }
1447 return x->operand();
1448 }
1449
1450 #ifdef ASSERT
1451 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1452 if (reg_num < _instruction_for_operand.length()) {
1453 return _instruction_for_operand.at(reg_num);
1454 }
1455 return nullptr;
1456 }
1457 #endif
1458
1459 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1460 if (_vreg_flags.size_in_bits() == 0) {
1461 BitMap2D temp(100, num_vreg_flags);
1462 _vreg_flags = temp;
1463 }
1464 _vreg_flags.at_put_grow(vreg_num, f, true);
1465 }
1466
1467 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1468 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1469 return false;
1470 }
1471 return _vreg_flags.at(vreg_num, f);
1472 }
1473
1474
1475 // Block local constant handling. This code is useful for keeping
1476 // unpinned constants and constants which aren't exposed in the IR in
1477 // registers. Unpinned Constant instructions have their operands
1478 // cleared when the block is finished so that other blocks can't end
1479 // up referring to their registers.
1480
1481 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1482 assert(!x->is_pinned(), "only for unpinned constants");
1483 _unpinned_constants.append(x);
1484 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1485 }
1486
1487
1488 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1489 BasicType t = c->type();
1490 for (int i = 0; i < _constants.length() && !in_conditional_code(); i++) {
1491 LIR_Const* other = _constants.at(i);
1492 if (t == other->type()) {
1493 switch (t) {
1494 case T_INT:
1495 case T_FLOAT:
1496 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1497 break;
1498 case T_LONG:
1499 case T_DOUBLE:
1500 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1501 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1502 break;
1503 case T_OBJECT:
1504 if (c->as_jobject() != other->as_jobject()) continue;
1505 break;
1506 default:
1507 break;
1508 }
1509 return _reg_for_constants.at(i);
1510 }
1511 }
1512
1513 LIR_Opr result = new_register(t);
1514 __ move((LIR_Opr)c, result);
1515 if (!in_conditional_code()) {
1516 _constants.append(c);
1517 _reg_for_constants.append(result);
1518 }
1519 return result;
1520 }
1521
1522 void LIRGenerator::set_in_conditional_code(bool v) {
1523 assert(v != _in_conditional_code, "must change state");
1524 _in_conditional_code = v;
1525 }
1526
1527
1528 //------------------------field access--------------------------------------
1529
1530 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1531 assert(x->number_of_arguments() == 4, "wrong type");
1532 LIRItem obj (x->argument_at(0), this); // object
1533 LIRItem offset(x->argument_at(1), this); // offset of field
1534 LIRItem cmp (x->argument_at(2), this); // value to compare with field
1535 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
1536 assert(obj.type()->tag() == objectTag, "invalid type");
1537 assert(cmp.type()->tag() == type->tag(), "invalid type");
1538 assert(val.type()->tag() == type->tag(), "invalid type");
1539
1540 LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1541 obj, offset, cmp, val);
1542 set_result(x, result);
1543 }
1544
1545 // Returns a int/long value with the null marker bit set
1546 static LIR_Opr null_marker_mask(BasicType bt, ciField* field) {
1547 assert(field->null_marker_offset() != -1, "field does not have null marker");
1548 int nm_offset = field->null_marker_offset() - field->offset_in_bytes();
1549 jlong null_marker = 1ULL << (nm_offset << LogBitsPerByte);
1550 return (bt == T_LONG) ? LIR_OprFact::longConst(null_marker) : LIR_OprFact::intConst(null_marker);
1551 }
1552
1553 // Comment copied form templateTable_i486.cpp
1554 // ----------------------------------------------------------------------------
1555 // Volatile variables demand their effects be made known to all CPU's in
1556 // order. Store buffers on most chips allow reads & writes to reorder; the
1557 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1558 // memory barrier (i.e., it's not sufficient that the interpreter does not
1559 // reorder volatile references, the hardware also must not reorder them).
1560 //
1561 // According to the new Java Memory Model (JMM):
1562 // (1) All volatiles are serialized wrt to each other.
1563 // ALSO reads & writes act as acquire & release, so:
1564 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1565 // the read float up to before the read. It's OK for non-volatile memory refs
1566 // that happen before the volatile read to float down below it.
1567 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1568 // that happen BEFORE the write float down to after the write. It's OK for
1569 // non-volatile memory refs that happen after the volatile write to float up
1570 // before it.
1571 //
1572 // We only put in barriers around volatile refs (they are expensive), not
1573 // _between_ memory refs (that would require us to track the flavor of the
1574 // previous memory refs). Requirements (2) and (3) require some barriers
1575 // before volatile stores and after volatile loads. These nearly cover
1576 // requirement (1) but miss the volatile-store-volatile-load case. This final
1577 // case is placed after volatile-stores although it could just as well go
1578 // before volatile-loads.
1579
1580
1581 void LIRGenerator::do_StoreField(StoreField* x) {
1582 ciField* field = x->field();
1583 bool needs_patching = x->needs_patching();
1584 bool is_volatile = field->is_volatile();
1585 BasicType field_type = x->field_type();
1586
1587 CodeEmitInfo* info = nullptr;
1588 if (needs_patching) {
1589 assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1590 info = state_for(x, x->state_before());
1591 } else if (x->needs_null_check()) {
1592 NullCheck* nc = x->explicit_null_check();
1593 if (nc == nullptr) {
1594 info = state_for(x);
1595 } else {
1596 info = state_for(nc);
1597 }
1598 }
1599
1600 LIRItem object(x->obj(), this);
1601 LIRItem value(x->value(), this);
1602
1603 object.load_item();
1604
1605 if (field->is_flat()) {
1606 value.load_item();
1607 } else {
1608 if (is_volatile || needs_patching) {
1609 // load item if field is volatile (fewer special cases for volatiles)
1610 // load item if field not initialized
1611 // load item if field not constant
1612 // because of code patching we cannot inline constants
1613 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1614 value.load_byte_item();
1615 } else {
1616 value.load_item();
1617 }
1618 } else {
1619 value.load_for_store(field_type);
1620 }
1621 }
1622
1623 set_no_result(x);
1624
1625 #ifndef PRODUCT
1626 if (PrintNotLoaded && needs_patching) {
1627 tty->print_cr(" ###class not loaded at store_%s bci %d",
1628 x->is_static() ? "static" : "field", x->printable_bci());
1629 }
1630 #endif
1631
1632 if (x->needs_null_check() &&
1633 (needs_patching ||
1634 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1635 // Emit an explicit null check because the offset is too large.
1636 // If the class is not loaded and the object is null, we need to deoptimize to throw a
1637 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1638 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1639 }
1640
1641 DecoratorSet decorators = IN_HEAP;
1642 if (is_volatile) {
1643 decorators |= MO_SEQ_CST;
1644 }
1645 if (needs_patching) {
1646 decorators |= C1_NEEDS_PATCHING;
1647 }
1648
1649 if (field->is_flat()) {
1650 ciInlineKlass* vk = field->type()->as_inline_klass();
1651
1652 #ifdef ASSERT
1653 assert(field->is_atomic(), "No atomic access required %s.%s", field->holder()->name()->as_utf8(), field->name()->as_utf8());
1654 // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
1655 assert(!vk->contains_oops() || !UseZGC, "ZGC does not support embedded oops in flat fields");
1656 #endif
1657
1658 // Zero the payload
1659 BasicType bt = vk->atomic_size_to_basic_type(field->is_null_free());
1660 LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
1661 LIR_Opr zero = (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0);
1662 __ move(zero, payload);
1663
1664 bool is_constant_null = value.is_constant() && value.value()->is_null_obj();
1665 if (!is_constant_null) {
1666 LabelObj* L_isNull = new LabelObj();
1667 bool needs_null_check = !value.is_constant() || value.value()->is_null_obj();
1668 if (needs_null_check) {
1669 __ cmp(lir_cond_equal, value.result(), LIR_OprFact::oopConst(nullptr));
1670 __ branch(lir_cond_equal, L_isNull->label());
1671 }
1672 // Load payload (if not empty) and set null marker (if not null-free)
1673 if (!vk->is_empty()) {
1674 access_load_at(decorators, bt, value, LIR_OprFact::intConst(vk->payload_offset()), payload);
1675 }
1676 if (!field->is_null_free()) {
1677 __ logical_or(payload, null_marker_mask(bt, field), payload);
1678 }
1679 if (needs_null_check) {
1680 __ branch_destination(L_isNull->label());
1681 }
1682 }
1683 access_store_at(decorators, bt, object, LIR_OprFact::intConst(x->offset()), payload,
1684 // Make sure to emit an implicit null check and pass the information
1685 // that this is a flat store that might require gc barriers for oop fields.
1686 info != nullptr ? new CodeEmitInfo(info) : nullptr, info, vk);
1687 return;
1688 }
1689
1690 access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1691 value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1692 }
1693
1694 // FIXME -- I can't find any other way to pass an address to access_load_at().
1695 class TempResolvedAddress: public Instruction {
1696 public:
1697 TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1698 set_operand(addr);
1699 }
1700 virtual void input_values_do(ValueVisitor*) {}
1701 virtual void visit(InstructionVisitor* v) {}
1702 virtual const char* name() const { return "TempResolvedAddress"; }
1703 };
1704
1705 LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) {
1706 ciType* array_type = array.value()->declared_type();
1707 ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass();
1708 assert(flat_array_klass->is_loaded(), "must be");
1709
1710 int array_header_size = flat_array_klass->array_header_in_bytes();
1711 int shift = flat_array_klass->log2_element_size();
1712
1713 #ifndef _LP64
1714 LIR_Opr index_op = new_register(T_INT);
1715 // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
1716 // the top (shift+1) bits of index_op must be zero, or
1717 // else throw ArrayIndexOutOfBoundsException
1718 if (index.result()->is_constant()) {
1719 jint const_index = index.result()->as_jint();
1720 __ move(LIR_OprFact::intConst(const_index << shift), index_op);
1721 } else {
1722 __ shift_left(index_op, shift, index.result());
1723 }
1724 #else
1725 LIR_Opr index_op = new_register(T_LONG);
1726 if (index.result()->is_constant()) {
1727 jint const_index = index.result()->as_jint();
1728 __ move(LIR_OprFact::longConst(const_index << shift), index_op);
1729 } else {
1730 __ convert(Bytecodes::_i2l, index.result(), index_op);
1731 // Need to shift manually, as LIR_Address can scale only up to 3.
1732 __ shift_left(index_op, shift, index_op);
1733 }
1734 #endif
1735
1736 LIR_Opr elm_op = new_pointer_register();
1737 LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS);
1738 __ leal(LIR_OprFact::address(elm_address), elm_op);
1739 return elm_op;
1740 }
1741
1742 void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, size_t sub_offset) {
1743 assert(field != nullptr, "Need a subelement type specified");
1744
1745 // Find the starting address of the source (inside the array)
1746 LIR_Opr elm_op = get_and_load_element_address(array, index);
1747
1748 BasicType subelt_type = field->type()->basic_type();
1749 TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(subelt_type), elm_op);
1750 LIRItem elm_item(elm_resolved_addr, this);
1751
1752 DecoratorSet decorators = IN_HEAP;
1753 access_load_at(decorators, subelt_type,
1754 elm_item, LIR_OprFact::longConst(sub_offset), result,
1755 nullptr, nullptr);
1756 }
1757
1758 void LIRGenerator::access_flat_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item,
1759 ciField* field, size_t sub_offset) {
1760 assert(sub_offset == 0 || field != nullptr, "Sanity check");
1761
1762 // Find the starting address of the source (inside the array)
1763 LIR_Opr elm_op = get_and_load_element_address(array, index);
1764
1765 ciInlineKlass* elem_klass = nullptr;
1766 if (field != nullptr) {
1767 elem_klass = field->type()->as_inline_klass();
1768 } else {
1769 elem_klass = array.value()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
1770 }
1771 for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1772 ciField* inner_field = elem_klass->nonstatic_field_at(i);
1773 assert(!inner_field->is_flat(), "flat fields must have been expanded");
1774 int obj_offset = inner_field->offset_in_bytes();
1775 size_t elm_offset = obj_offset - elem_klass->payload_offset() + sub_offset; // object header is not stored in array.
1776 BasicType field_type = inner_field->type()->basic_type();
1777
1778 // Types which are smaller than int are still passed in an int register.
1779 BasicType reg_type = field_type;
1780 switch (reg_type) {
1781 case T_BYTE:
1782 case T_BOOLEAN:
1783 case T_SHORT:
1784 case T_CHAR:
1785 reg_type = T_INT;
1786 break;
1787 default:
1788 break;
1789 }
1790
1791 LIR_Opr temp = new_register(reg_type);
1792 TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1793 LIRItem elm_item(elm_resolved_addr, this);
1794
1795 DecoratorSet decorators = IN_HEAP;
1796 if (is_load) {
1797 access_load_at(decorators, field_type,
1798 elm_item, LIR_OprFact::longConst(elm_offset), temp,
1799 nullptr, nullptr);
1800 access_store_at(decorators, field_type,
1801 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1802 nullptr, nullptr);
1803 } else {
1804 access_load_at(decorators, field_type,
1805 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1806 nullptr, nullptr);
1807 access_store_at(decorators, field_type,
1808 elm_item, LIR_OprFact::longConst(elm_offset), temp,
1809 nullptr, nullptr);
1810 }
1811 }
1812 }
1813
1814 void LIRGenerator::check_flat_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) {
1815 LIR_Opr tmp = new_register(T_METADATA);
1816 __ check_flat_array(array, value, tmp, slow_path);
1817 }
1818
1819 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
1820 LabelObj* L_end = new LabelObj();
1821 LIR_Opr tmp = new_register(T_METADATA);
1822 __ check_null_free_array(array.result(), tmp);
1823 __ branch(lir_cond_equal, L_end->label());
1824 __ null_check(value.result(), info);
1825 __ branch_destination(L_end->label());
1826 }
1827
1828 bool LIRGenerator::needs_flat_array_store_check(StoreIndexed* x) {
1829 if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
1830 ciType* type = x->value()->declared_type();
1831 if (type != nullptr && type->is_klass()) {
1832 ciKlass* klass = type->as_klass();
1833 if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->maybe_flat_in_array())) {
1834 // This is known to be a non-flat object. If the array is a flat array,
1835 // it will be caught by the code generated by array_store_check().
1836 return false;
1837 }
1838 }
1839 // We're not 100% sure, so let's do the flat_array_store_check.
1840 return true;
1841 }
1842 return false;
1843 }
1844
1845 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
1846 return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
1847 }
1848
1849 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1850 assert(x->is_pinned(),"");
1851 assert(x->elt_type() != T_ARRAY, "never used");
1852 bool is_loaded_flat_array = x->array()->is_loaded_flat_array();
1853 bool needs_range_check = x->compute_needs_range_check();
1854 bool use_length = x->length() != nullptr;
1855 bool obj_store = is_reference_type(x->elt_type());
1856 bool needs_store_check = obj_store && !(is_loaded_flat_array && x->is_exact_flat_array_store()) &&
1857 (x->value()->as_Constant() == nullptr ||
1858 !get_jobject_constant(x->value())->is_null_object());
1859
1860 LIRItem array(x->array(), this);
1861 LIRItem index(x->index(), this);
1862 LIRItem value(x->value(), this);
1863 LIRItem length(this);
1864
1865 array.load_item();
1866 index.load_nonconstant();
1867
1868 if (use_length && needs_range_check) {
1869 length.set_instruction(x->length());
1870 length.load_item();
1871 }
1872
1873 if (needs_store_check || x->check_boolean()
1874 || is_loaded_flat_array || needs_flat_array_store_check(x) || needs_null_free_array_store_check(x)) {
1875 value.load_item();
1876 } else {
1877 value.load_for_store(x->elt_type());
1878 }
1879
1880 set_no_result(x);
1881
1882 // the CodeEmitInfo must be duplicated for each different
1883 // LIR-instruction because spilling can occur anywhere between two
1884 // instructions and so the debug information must be different
1885 CodeEmitInfo* range_check_info = state_for(x);
1886 CodeEmitInfo* null_check_info = nullptr;
1887 if (x->needs_null_check()) {
1888 null_check_info = new CodeEmitInfo(range_check_info);
1889 }
1890
1891 if (needs_range_check) {
1892 if (use_length) {
1893 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1894 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1895 } else {
1896 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1897 // range_check also does the null check
1898 null_check_info = nullptr;
1899 }
1900 }
1901
1902 if (GenerateArrayStoreCheck && needs_store_check) {
1903 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1904 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1905 }
1906
1907 if (x->should_profile()) {
1908 if (is_loaded_flat_array) {
1909 // No need to profile a store to a flat array of known type. This can happen if
1910 // the type only became known after optimizations (for example, after the PhiSimplifier).
1911 x->set_should_profile(false);
1912 } else {
1913 int bci = x->profiled_bci();
1914 ciMethodData* md = x->profiled_method()->method_data();
1915 assert(md != nullptr, "Sanity");
1916 ciProfileData* data = md->bci_to_data(bci);
1917 assert(data != nullptr && data->is_ArrayStoreData(), "incorrect profiling entry");
1918 ciArrayStoreData* store_data = (ciArrayStoreData*)data;
1919 profile_array_type(x, md, store_data);
1920 assert(store_data->is_ArrayStoreData(), "incorrect profiling entry");
1921 if (x->array()->maybe_null_free_array()) {
1922 profile_null_free_array(array, md, data);
1923 }
1924 }
1925 }
1926
1927 if (is_loaded_flat_array) {
1928 // TODO 8350865 This is currently dead code
1929 if (!x->value()->is_null_free()) {
1930 __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1931 }
1932 // If array element is an empty inline type, no need to copy anything
1933 if (!x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
1934 access_flat_array(false, array, index, value);
1935 }
1936 } else {
1937 StoreFlattenedArrayStub* slow_path = nullptr;
1938
1939 if (needs_flat_array_store_check(x)) {
1940 // Check if we indeed have a flat array
1941 index.load_item();
1942 slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before()));
1943 check_flat_array(array.result(), value.result(), slow_path);
1944 set_in_conditional_code(true);
1945 } else if (needs_null_free_array_store_check(x)) {
1946 CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1947 check_null_free_array(array, value, info);
1948 }
1949
1950 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1951 if (x->check_boolean()) {
1952 decorators |= C1_MASK_BOOLEAN;
1953 }
1954
1955 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), nullptr, null_check_info);
1956 if (slow_path != nullptr) {
1957 __ branch_destination(slow_path->continuation());
1958 set_in_conditional_code(false);
1959 }
1960 }
1961 }
1962
1963 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1964 LIRItem& base, LIR_Opr offset, LIR_Opr result,
1965 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1966 decorators |= ACCESS_READ;
1967 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1968 if (access.is_raw()) {
1969 _barrier_set->BarrierSetC1::load_at(access, result);
1970 } else {
1971 _barrier_set->load_at(access, result);
1972 }
1973 }
1974
1975 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1976 LIR_Opr addr, LIR_Opr result) {
1977 decorators |= ACCESS_READ;
1978 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1979 access.set_resolved_addr(addr);
1980 if (access.is_raw()) {
1981 _barrier_set->BarrierSetC1::load(access, result);
1982 } else {
1983 _barrier_set->load(access, result);
1984 }
1985 }
1986
1987 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1988 LIRItem& base, LIR_Opr offset, LIR_Opr value,
1989 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info,
1990 ciInlineKlass* vk) {
1991 decorators |= ACCESS_WRITE;
1992 LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info, vk);
1993 if (access.is_raw()) {
1994 _barrier_set->BarrierSetC1::store_at(access, value);
1995 } else {
1996 _barrier_set->store_at(access, value);
1997 }
1998 }
1999
2000 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
2001 LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
2002 decorators |= ACCESS_READ;
2003 decorators |= ACCESS_WRITE;
2004 // Atomic operations are SEQ_CST by default
2005 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2006 LIRAccess access(this, decorators, base, offset, type);
2007 if (access.is_raw()) {
2008 return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
2009 } else {
2010 return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
2011 }
2012 }
2013
2014 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
2015 LIRItem& base, LIRItem& offset, LIRItem& value) {
2016 decorators |= ACCESS_READ;
2017 decorators |= ACCESS_WRITE;
2018 // Atomic operations are SEQ_CST by default
2019 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2020 LIRAccess access(this, decorators, base, offset, type);
2021 if (access.is_raw()) {
2022 return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
2023 } else {
2024 return _barrier_set->atomic_xchg_at(access, value);
2025 }
2026 }
2027
2028 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
2029 LIRItem& base, LIRItem& offset, LIRItem& value) {
2030 decorators |= ACCESS_READ;
2031 decorators |= ACCESS_WRITE;
2032 // Atomic operations are SEQ_CST by default
2033 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2034 LIRAccess access(this, decorators, base, offset, type);
2035 if (access.is_raw()) {
2036 return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
2037 } else {
2038 return _barrier_set->atomic_add_at(access, value);
2039 }
2040 }
2041
2042 void LIRGenerator::do_LoadField(LoadField* x) {
2043 ciField* field = x->field();
2044 bool needs_patching = x->needs_patching();
2045 bool is_volatile = field->is_volatile();
2046 BasicType field_type = x->field_type();
2047
2048 CodeEmitInfo* info = nullptr;
2049 if (needs_patching) {
2050 assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
2051 info = state_for(x, x->state_before());
2052 } else if (x->needs_null_check()) {
2053 NullCheck* nc = x->explicit_null_check();
2054 if (nc == nullptr) {
2055 info = state_for(x);
2056 } else {
2057 info = state_for(nc);
2058 }
2059 }
2060
2061 LIRItem object(x->obj(), this);
2062
2063 object.load_item();
2064
2065 #ifndef PRODUCT
2066 if (PrintNotLoaded && needs_patching) {
2067 tty->print_cr(" ###class not loaded at load_%s bci %d",
2068 x->is_static() ? "static" : "field", x->printable_bci());
2069 }
2070 #endif
2071
2072 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
2073 if (x->needs_null_check() &&
2074 (needs_patching ||
2075 MacroAssembler::needs_explicit_null_check(x->offset()) ||
2076 stress_deopt)) {
2077 LIR_Opr obj = object.result();
2078 if (stress_deopt) {
2079 obj = new_register(T_OBJECT);
2080 __ move(LIR_OprFact::oopConst(nullptr), obj);
2081 }
2082 // Emit an explicit null check because the offset is too large.
2083 // If the class is not loaded and the object is null, we need to deoptimize to throw a
2084 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
2085 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
2086 }
2087
2088 DecoratorSet decorators = IN_HEAP;
2089 if (is_volatile) {
2090 decorators |= MO_SEQ_CST;
2091 }
2092 if (needs_patching) {
2093 decorators |= C1_NEEDS_PATCHING;
2094 }
2095
2096 if (field->is_flat()) {
2097 ciInlineKlass* vk = field->type()->as_inline_klass();
2098 #ifdef ASSERT
2099 assert(field->is_atomic(), "No atomic access required");
2100 assert(x->state_before() != nullptr, "Needs state before");
2101 #endif
2102
2103 // Allocate buffer (we can't easily do this conditionally on the null check below
2104 // because branches added in the LIR are opaque to the register allocator).
2105 NewInstance* buffer = new NewInstance(vk, x->state_before(), false, true);
2106 do_NewInstance(buffer);
2107 LIRItem dest(buffer, this);
2108
2109 // Copy the payload to the buffer
2110 BasicType bt = vk->atomic_size_to_basic_type(field->is_null_free());
2111 LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
2112 access_load_at(decorators, bt, object, LIR_OprFact::intConst(field->offset_in_bytes()), payload,
2113 // Make sure to emit an implicit null check
2114 info ? new CodeEmitInfo(info) : nullptr, info);
2115 access_store_at(decorators, bt, dest, LIR_OprFact::intConst(vk->payload_offset()), payload);
2116
2117 if (field->is_null_free()) {
2118 set_result(x, buffer->operand());
2119 } else {
2120 // Check the null marker and set result to null if it's not set
2121 __ logical_and(payload, null_marker_mask(bt, field), payload);
2122 __ cmp(lir_cond_equal, payload, (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0));
2123 __ cmove(lir_cond_equal, LIR_OprFact::oopConst(nullptr), buffer->operand(), rlock_result(x), T_OBJECT);
2124 }
2125
2126 // Ensure the copy is visible before any subsequent store that publishes the buffer.
2127 __ membar_storestore();
2128 return;
2129 }
2130
2131 LIR_Opr result = rlock_result(x, field_type);
2132 access_load_at(decorators, field_type,
2133 object, LIR_OprFact::intConst(x->offset()), result,
2134 info ? new CodeEmitInfo(info) : nullptr, info);
2135 }
2136
2137 // int/long jdk.internal.util.Preconditions.checkIndex
2138 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
2139 assert(x->number_of_arguments() == 3, "wrong type");
2140 LIRItem index(x->argument_at(0), this);
2141 LIRItem length(x->argument_at(1), this);
2142 LIRItem oobef(x->argument_at(2), this);
2143
2144 index.load_item();
2145 length.load_item();
2146 oobef.load_item();
2147
2148 LIR_Opr result = rlock_result(x);
2149 // x->state() is created from copy_state_for_exception, it does not contains arguments
2150 // we should prepare them before entering into interpreter mode due to deoptimization.
2151 ValueStack* state = x->state();
2152 for (int i = 0; i < x->number_of_arguments(); i++) {
2153 Value arg = x->argument_at(i);
2154 state->push(arg->type(), arg);
2155 }
2156 CodeEmitInfo* info = state_for(x, state);
2157
2158 LIR_Opr len = length.result();
2159 LIR_Opr zero;
2160 if (type == T_INT) {
2161 zero = LIR_OprFact::intConst(0);
2162 if (length.result()->is_constant()){
2163 len = LIR_OprFact::intConst(length.result()->as_jint());
2164 }
2165 } else {
2166 assert(type == T_LONG, "sanity check");
2167 zero = LIR_OprFact::longConst(0);
2168 if (length.result()->is_constant()){
2169 len = LIR_OprFact::longConst(length.result()->as_jlong());
2170 }
2171 }
2172 // C1 can not handle the case that comparing index with constant value while condition
2173 // is neither lir_cond_equal nor lir_cond_notEqual, see LIR_Assembler::comp_op.
2174 LIR_Opr zero_reg = new_register(type);
2175 __ move(zero, zero_reg);
2176 #if defined(X86) && !defined(_LP64)
2177 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
2178 LIR_Opr index_copy = new_register(index.type());
2179 // index >= 0
2180 __ move(index.result(), index_copy);
2181 __ cmp(lir_cond_less, index_copy, zero_reg);
2182 __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2183 Deoptimization::Action_make_not_entrant));
2184 // index < length
2185 __ move(index.result(), index_copy);
2186 __ cmp(lir_cond_greaterEqual, index_copy, len);
2187 __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2188 Deoptimization::Action_make_not_entrant));
2189 #else
2190 // index >= 0
2191 __ cmp(lir_cond_less, index.result(), zero_reg);
2192 __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2193 Deoptimization::Action_make_not_entrant));
2194 // index < length
2195 __ cmp(lir_cond_greaterEqual, index.result(), len);
2196 __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2197 Deoptimization::Action_make_not_entrant));
2198 #endif
2199 __ move(index.result(), result);
2200 }
2201
2202 //------------------------array access--------------------------------------
2203
2204
2205 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
2206 LIRItem array(x->array(), this);
2207 array.load_item();
2208 LIR_Opr reg = rlock_result(x);
2209
2210 CodeEmitInfo* info = nullptr;
2211 if (x->needs_null_check()) {
2212 NullCheck* nc = x->explicit_null_check();
2213 if (nc == nullptr) {
2214 info = state_for(x);
2215 } else {
2216 info = state_for(nc);
2217 }
2218 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
2219 LIR_Opr obj = new_register(T_OBJECT);
2220 __ move(LIR_OprFact::oopConst(nullptr), obj);
2221 __ null_check(obj, new CodeEmitInfo(info));
2222 }
2223 }
2224 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
2225 }
2226
2227
2228 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
2229 bool use_length = x->length() != nullptr;
2230 LIRItem array(x->array(), this);
2231 LIRItem index(x->index(), this);
2232 LIRItem length(this);
2233 bool needs_range_check = x->compute_needs_range_check();
2234
2235 if (use_length && needs_range_check) {
2236 length.set_instruction(x->length());
2237 length.load_item();
2238 }
2239
2240 array.load_item();
2241 if (index.is_constant() && can_inline_as_constant(x->index())) {
2242 // let it be a constant
2243 index.dont_load_item();
2244 } else {
2245 index.load_item();
2246 }
2247
2248 CodeEmitInfo* range_check_info = state_for(x);
2249 CodeEmitInfo* null_check_info = nullptr;
2250 if (x->needs_null_check()) {
2251 NullCheck* nc = x->explicit_null_check();
2252 if (nc != nullptr) {
2253 null_check_info = state_for(nc);
2254 } else {
2255 null_check_info = range_check_info;
2256 }
2257 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
2258 LIR_Opr obj = new_register(T_OBJECT);
2259 __ move(LIR_OprFact::oopConst(nullptr), obj);
2260 __ null_check(obj, new CodeEmitInfo(null_check_info));
2261 }
2262 }
2263
2264 if (needs_range_check) {
2265 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2266 __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2267 } else if (use_length) {
2268 // TODO: use a (modified) version of array_range_check that does not require a
2269 // constant length to be loaded to a register
2270 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2271 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2272 } else {
2273 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2274 // The range check performs the null check, so clear it out for the load
2275 null_check_info = nullptr;
2276 }
2277 }
2278
2279 ciMethodData* md = nullptr;
2280 ciProfileData* data = nullptr;
2281 if (x->should_profile()) {
2282 if (x->array()->is_loaded_flat_array()) {
2283 // No need to profile a load from a flat array of known type. This can happen if
2284 // the type only became known after optimizations (for example, after the PhiSimplifier).
2285 x->set_should_profile(false);
2286 } else {
2287 int bci = x->profiled_bci();
2288 md = x->profiled_method()->method_data();
2289 assert(md != nullptr, "Sanity");
2290 data = md->bci_to_data(bci);
2291 assert(data != nullptr && data->is_ArrayLoadData(), "incorrect profiling entry");
2292 ciArrayLoadData* load_data = (ciArrayLoadData*)data;
2293 profile_array_type(x, md, load_data);
2294 }
2295 }
2296
2297 Value element = nullptr;
2298 if (x->vt() != nullptr) {
2299 assert(x->array()->is_loaded_flat_array(), "must be");
2300 // Find the destination address (of the NewInlineTypeInstance).
2301 LIRItem obj_item(x->vt(), this);
2302
2303 access_flat_array(true, array, index, obj_item,
2304 x->delayed() == nullptr ? 0 : x->delayed()->field(),
2305 x->delayed() == nullptr ? 0 : x->delayed()->offset());
2306 set_no_result(x);
2307 } else if (x->delayed() != nullptr) {
2308 assert(x->array()->is_loaded_flat_array(), "must be");
2309 LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type());
2310 access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset());
2311 } else {
2312 LIR_Opr result = rlock_result(x, x->elt_type());
2313 LoadFlattenedArrayStub* slow_path = nullptr;
2314
2315 if (x->should_profile() && x->array()->maybe_null_free_array()) {
2316 profile_null_free_array(array, md, data);
2317 }
2318
2319 if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
2320 assert(x->delayed() == nullptr, "Delayed LoadIndexed only apply to loaded_flat_arrays");
2321 index.load_item();
2322 // if we are loading from a flat array, load it using a runtime call
2323 slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before()));
2324 check_flat_array(array.result(), LIR_OprFact::illegalOpr, slow_path);
2325 set_in_conditional_code(true);
2326 }
2327
2328 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2329 access_load_at(decorators, x->elt_type(),
2330 array, index.result(), result,
2331 nullptr, null_check_info);
2332
2333 if (slow_path != nullptr) {
2334 __ branch_destination(slow_path->continuation());
2335 set_in_conditional_code(false);
2336 }
2337
2338 element = x;
2339 }
2340
2341 if (x->should_profile()) {
2342 profile_element_type(element, md, (ciArrayLoadData*)data);
2343 }
2344 }
2345
2346
2347 void LIRGenerator::do_NullCheck(NullCheck* x) {
2348 if (x->can_trap()) {
2349 LIRItem value(x->obj(), this);
2350 value.load_item();
2351 CodeEmitInfo* info = state_for(x);
2352 __ null_check(value.result(), info);
2353 }
2354 }
2355
2356
2357 void LIRGenerator::do_TypeCast(TypeCast* x) {
2358 LIRItem value(x->obj(), this);
2359 value.load_item();
2360 // the result is the same as from the node we are casting
2361 set_result(x, value.result());
2362 }
2363
2364
2365 void LIRGenerator::do_Throw(Throw* x) {
2366 LIRItem exception(x->exception(), this);
2367 exception.load_item();
2368 set_no_result(x);
2369 LIR_Opr exception_opr = exception.result();
2370 CodeEmitInfo* info = state_for(x, x->state());
2371
2372 #ifndef PRODUCT
2373 if (PrintC1Statistics) {
2374 increment_counter(Runtime1::throw_count_address(), T_INT);
2375 }
2376 #endif
2377
2378 // check if the instruction has an xhandler in any of the nested scopes
2379 bool unwind = false;
2380 if (info->exception_handlers()->length() == 0) {
2381 // this throw is not inside an xhandler
2382 unwind = true;
2383 } else {
2384 // get some idea of the throw type
2385 bool type_is_exact = true;
2386 ciType* throw_type = x->exception()->exact_type();
2387 if (throw_type == nullptr) {
2388 type_is_exact = false;
2389 throw_type = x->exception()->declared_type();
2390 }
2391 if (throw_type != nullptr && throw_type->is_instance_klass()) {
2392 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2393 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2394 }
2395 }
2396
2397 // do null check before moving exception oop into fixed register
2398 // to avoid a fixed interval with an oop during the null check.
2399 // Use a copy of the CodeEmitInfo because debug information is
2400 // different for null_check and throw.
2401 if (x->exception()->as_NewInstance() == nullptr && x->exception()->as_ExceptionObject() == nullptr) {
2402 // if the exception object wasn't created using new then it might be null.
2403 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2404 }
2405
2406 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2407 // we need to go through the exception lookup path to get JVMTI
2408 // notification done
2409 unwind = false;
2410 }
2411
2412 // move exception oop into fixed register
2413 __ move(exception_opr, exceptionOopOpr());
2414
2415 if (unwind) {
2416 __ unwind_exception(exceptionOopOpr());
2417 } else {
2418 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2419 }
2420 }
2421
2422
2423 void LIRGenerator::do_UnsafeGet(UnsafeGet* x) {
2424 BasicType type = x->basic_type();
2425 LIRItem src(x->object(), this);
2426 LIRItem off(x->offset(), this);
2427
2428 off.load_item();
2429 src.load_item();
2430
2431 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2432
2433 if (x->is_volatile()) {
2434 decorators |= MO_SEQ_CST;
2435 }
2436 if (type == T_BOOLEAN) {
2437 decorators |= C1_MASK_BOOLEAN;
2438 }
2439 if (is_reference_type(type)) {
2440 decorators |= ON_UNKNOWN_OOP_REF;
2441 }
2442
2443 LIR_Opr result = rlock_result(x, type);
2444 if (!x->is_raw()) {
2445 access_load_at(decorators, type, src, off.result(), result);
2446 } else {
2447 // Currently it is only used in GraphBuilder::setup_osr_entry_block.
2448 // It reads the value from [src + offset] directly.
2449 #ifdef _LP64
2450 LIR_Opr offset = new_register(T_LONG);
2451 __ convert(Bytecodes::_i2l, off.result(), offset);
2452 #else
2453 LIR_Opr offset = off.result();
2454 #endif
2455 LIR_Address* addr = new LIR_Address(src.result(), offset, type);
2456 if (is_reference_type(type)) {
2457 __ move_wide(addr, result);
2458 } else {
2459 __ move(addr, result);
2460 }
2461 }
2462 }
2463
2464
2465 void LIRGenerator::do_UnsafePut(UnsafePut* x) {
2466 BasicType type = x->basic_type();
2467 LIRItem src(x->object(), this);
2468 LIRItem off(x->offset(), this);
2469 LIRItem data(x->value(), this);
2470
2471 src.load_item();
2472 if (type == T_BOOLEAN || type == T_BYTE) {
2473 data.load_byte_item();
2474 } else {
2475 data.load_item();
2476 }
2477 off.load_item();
2478
2479 set_no_result(x);
2480
2481 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2482 if (is_reference_type(type)) {
2483 decorators |= ON_UNKNOWN_OOP_REF;
2484 }
2485 if (x->is_volatile()) {
2486 decorators |= MO_SEQ_CST;
2487 }
2488 access_store_at(decorators, type, src, off.result(), data.result());
2489 }
2490
2491 void LIRGenerator::do_UnsafeGetAndSet(UnsafeGetAndSet* x) {
2492 BasicType type = x->basic_type();
2493 LIRItem src(x->object(), this);
2494 LIRItem off(x->offset(), this);
2495 LIRItem value(x->value(), this);
2496
2497 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;
2498
2499 if (is_reference_type(type)) {
2500 decorators |= ON_UNKNOWN_OOP_REF;
2501 }
2502
2503 LIR_Opr result;
2504 if (x->is_add()) {
2505 result = access_atomic_add_at(decorators, type, src, off, value);
2506 } else {
2507 result = access_atomic_xchg_at(decorators, type, src, off, value);
2508 }
2509 set_result(x, result);
2510 }
2511
2512 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2513 int lng = x->length();
2514
2515 for (int i = 0; i < lng; i++) {
2516 C1SwitchRange* one_range = x->at(i);
2517 int low_key = one_range->low_key();
2518 int high_key = one_range->high_key();
2519 BlockBegin* dest = one_range->sux();
2520 if (low_key == high_key) {
2521 __ cmp(lir_cond_equal, value, low_key);
2522 __ branch(lir_cond_equal, dest);
2523 } else if (high_key - low_key == 1) {
2524 __ cmp(lir_cond_equal, value, low_key);
2525 __ branch(lir_cond_equal, dest);
2526 __ cmp(lir_cond_equal, value, high_key);
2527 __ branch(lir_cond_equal, dest);
2528 } else {
2529 LabelObj* L = new LabelObj();
2530 __ cmp(lir_cond_less, value, low_key);
2531 __ branch(lir_cond_less, L->label());
2532 __ cmp(lir_cond_lessEqual, value, high_key);
2533 __ branch(lir_cond_lessEqual, dest);
2534 __ branch_destination(L->label());
2535 }
2536 }
2537 __ jump(default_sux);
2538 }
2539
2540
2541 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2542 SwitchRangeList* res = new SwitchRangeList();
2543 int len = x->length();
2544 if (len > 0) {
2545 BlockBegin* sux = x->sux_at(0);
2546 int low = x->lo_key();
2547 BlockBegin* default_sux = x->default_sux();
2548 C1SwitchRange* range = new C1SwitchRange(low, sux);
2549 for (int i = 0; i < len; i++) {
2550 int key = low + i;
2551 BlockBegin* new_sux = x->sux_at(i);
2552 if (sux == new_sux) {
2553 // still in same range
2554 range->set_high_key(key);
2555 } else {
2556 // skip tests which explicitly dispatch to the default
2557 if (sux != default_sux) {
2558 res->append(range);
2559 }
2560 range = new C1SwitchRange(key, new_sux);
2561 }
2562 sux = new_sux;
2563 }
2564 if (res->length() == 0 || res->last() != range) res->append(range);
2565 }
2566 return res;
2567 }
2568
2569
2570 // we expect the keys to be sorted by increasing value
2571 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2572 SwitchRangeList* res = new SwitchRangeList();
2573 int len = x->length();
2574 if (len > 0) {
2575 BlockBegin* default_sux = x->default_sux();
2576 int key = x->key_at(0);
2577 BlockBegin* sux = x->sux_at(0);
2578 C1SwitchRange* range = new C1SwitchRange(key, sux);
2579 for (int i = 1; i < len; i++) {
2580 int new_key = x->key_at(i);
2581 BlockBegin* new_sux = x->sux_at(i);
2582 if (key+1 == new_key && sux == new_sux) {
2583 // still in same range
2584 range->set_high_key(new_key);
2585 } else {
2586 // skip tests which explicitly dispatch to the default
2587 if (range->sux() != default_sux) {
2588 res->append(range);
2589 }
2590 range = new C1SwitchRange(new_key, new_sux);
2591 }
2592 key = new_key;
2593 sux = new_sux;
2594 }
2595 if (res->length() == 0 || res->last() != range) res->append(range);
2596 }
2597 return res;
2598 }
2599
2600
2601 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2602 LIRItem tag(x->tag(), this);
2603 tag.load_item();
2604 set_no_result(x);
2605
2606 if (x->is_safepoint()) {
2607 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2608 }
2609
2610 // move values into phi locations
2611 move_to_phi(x->state());
2612
2613 int lo_key = x->lo_key();
2614 int len = x->length();
2615 assert(lo_key <= (lo_key + (len - 1)), "integer overflow");
2616 LIR_Opr value = tag.result();
2617
2618 if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2619 ciMethod* method = x->state()->scope()->method();
2620 ciMethodData* md = method->method_data_or_null();
2621 assert(md != nullptr, "Sanity");
2622 ciProfileData* data = md->bci_to_data(x->state()->bci());
2623 assert(data != nullptr, "must have profiling data");
2624 assert(data->is_MultiBranchData(), "bad profile data?");
2625 int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2626 LIR_Opr md_reg = new_register(T_METADATA);
2627 __ metadata2reg(md->constant_encoding(), md_reg);
2628 LIR_Opr data_offset_reg = new_pointer_register();
2629 LIR_Opr tmp_reg = new_pointer_register();
2630
2631 __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2632 for (int i = 0; i < len; i++) {
2633 int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2634 __ cmp(lir_cond_equal, value, i + lo_key);
2635 __ move(data_offset_reg, tmp_reg);
2636 __ cmove(lir_cond_equal,
2637 LIR_OprFact::intptrConst(count_offset),
2638 tmp_reg,
2639 data_offset_reg, T_INT);
2640 }
2641
2642 LIR_Opr data_reg = new_pointer_register();
2643 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2644 __ move(data_addr, data_reg);
2645 __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2646 __ move(data_reg, data_addr);
2647 }
2648
2649 if (UseTableRanges) {
2650 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2651 } else {
2652 for (int i = 0; i < len; i++) {
2653 __ cmp(lir_cond_equal, value, i + lo_key);
2654 __ branch(lir_cond_equal, x->sux_at(i));
2655 }
2656 __ jump(x->default_sux());
2657 }
2658 }
2659
2660
2661 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2662 LIRItem tag(x->tag(), this);
2663 tag.load_item();
2664 set_no_result(x);
2665
2666 if (x->is_safepoint()) {
2667 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2668 }
2669
2670 // move values into phi locations
2671 move_to_phi(x->state());
2672
2673 LIR_Opr value = tag.result();
2674 int len = x->length();
2675
2676 if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2677 ciMethod* method = x->state()->scope()->method();
2678 ciMethodData* md = method->method_data_or_null();
2679 assert(md != nullptr, "Sanity");
2680 ciProfileData* data = md->bci_to_data(x->state()->bci());
2681 assert(data != nullptr, "must have profiling data");
2682 assert(data->is_MultiBranchData(), "bad profile data?");
2683 int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2684 LIR_Opr md_reg = new_register(T_METADATA);
2685 __ metadata2reg(md->constant_encoding(), md_reg);
2686 LIR_Opr data_offset_reg = new_pointer_register();
2687 LIR_Opr tmp_reg = new_pointer_register();
2688
2689 __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2690 for (int i = 0; i < len; i++) {
2691 int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2692 __ cmp(lir_cond_equal, value, x->key_at(i));
2693 __ move(data_offset_reg, tmp_reg);
2694 __ cmove(lir_cond_equal,
2695 LIR_OprFact::intptrConst(count_offset),
2696 tmp_reg,
2697 data_offset_reg, T_INT);
2698 }
2699
2700 LIR_Opr data_reg = new_pointer_register();
2701 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2702 __ move(data_addr, data_reg);
2703 __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2704 __ move(data_reg, data_addr);
2705 }
2706
2707 if (UseTableRanges) {
2708 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2709 } else {
2710 int len = x->length();
2711 for (int i = 0; i < len; i++) {
2712 __ cmp(lir_cond_equal, value, x->key_at(i));
2713 __ branch(lir_cond_equal, x->sux_at(i));
2714 }
2715 __ jump(x->default_sux());
2716 }
2717 }
2718
2719
2720 void LIRGenerator::do_Goto(Goto* x) {
2721 set_no_result(x);
2722
2723 if (block()->next()->as_OsrEntry()) {
2724 // need to free up storage used for OSR entry point
2725 LIR_Opr osrBuffer = block()->next()->operand();
2726 BasicTypeList signature;
2727 signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2728 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2729 __ move(osrBuffer, cc->args()->at(0));
2730 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2731 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2732 }
2733
2734 if (x->is_safepoint()) {
2735 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2736
2737 // increment backedge counter if needed
2738 CodeEmitInfo* info = state_for(x, state);
2739 increment_backedge_counter(info, x->profiled_bci());
2740 CodeEmitInfo* safepoint_info = state_for(x, state);
2741 __ safepoint(safepoint_poll_register(), safepoint_info);
2742 }
2743
2744 // Gotos can be folded Ifs, handle this case.
2745 if (x->should_profile()) {
2746 ciMethod* method = x->profiled_method();
2747 assert(method != nullptr, "method should be set if branch is profiled");
2748 ciMethodData* md = method->method_data_or_null();
2749 assert(md != nullptr, "Sanity");
2750 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2751 assert(data != nullptr, "must have profiling data");
2752 int offset;
2753 if (x->direction() == Goto::taken) {
2754 assert(data->is_BranchData(), "need BranchData for two-way branches");
2755 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2756 } else if (x->direction() == Goto::not_taken) {
2757 assert(data->is_BranchData(), "need BranchData for two-way branches");
2758 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2759 } else {
2760 assert(data->is_JumpData(), "need JumpData for branches");
2761 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2762 }
2763 LIR_Opr md_reg = new_register(T_METADATA);
2764 __ metadata2reg(md->constant_encoding(), md_reg);
2765
2766 increment_counter(new LIR_Address(md_reg, offset,
2767 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2768 }
2769
2770 // emit phi-instruction move after safepoint since this simplifies
2771 // describing the state as the safepoint.
2772 move_to_phi(x->state());
2773
2774 __ jump(x->default_sux());
2775 }
2776
2777 /**
2778 * Emit profiling code if needed for arguments, parameters, return value types
2779 *
2780 * @param md MDO the code will update at runtime
2781 * @param md_base_offset common offset in the MDO for this profile and subsequent ones
2782 * @param md_offset offset in the MDO (on top of md_base_offset) for this profile
2783 * @param profiled_k current profile
2784 * @param obj IR node for the object to be profiled
2785 * @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
2786 * Set once we find an update to make and use for next ones.
2787 * @param not_null true if we know obj cannot be null
2788 * @param signature_at_call_k signature at call for obj
2789 * @param callee_signature_k signature of callee for obj
2790 * at call and callee signatures differ at method handle call
2791 * @return the only klass we know will ever be seen at this profile point
2792 */
2793 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2794 Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2795 ciKlass* callee_signature_k) {
2796 ciKlass* result = nullptr;
2797 bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2798 bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2799 // known not to be null or null bit already set and already set to
2800 // unknown: nothing we can do to improve profiling
2801 if (!do_null && !do_update) {
2802 return result;
2803 }
2804
2805 ciKlass* exact_klass = nullptr;
2806 Compilation* comp = Compilation::current();
2807 if (do_update) {
2808 // try to find exact type, using CHA if possible, so that loading
2809 // the klass from the object can be avoided
2810 ciType* type = obj->exact_type();
2811 if (type == nullptr) {
2812 type = obj->declared_type();
2813 type = comp->cha_exact_type(type);
2814 }
2815 assert(type == nullptr || type->is_klass(), "type should be class");
2816 exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2817
2818 do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2819 }
2820
2821 if (!do_null && !do_update) {
2822 return result;
2823 }
2824
2825 ciKlass* exact_signature_k = nullptr;
2826 if (do_update && signature_at_call_k != nullptr) {
2827 // Is the type from the signature exact (the only one possible)?
2828 exact_signature_k = signature_at_call_k->exact_klass();
2829 if (exact_signature_k == nullptr) {
2830 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2831 } else {
2832 result = exact_signature_k;
2833 // Known statically. No need to emit any code: prevent
2834 // LIR_Assembler::emit_profile_type() from emitting useless code
2835 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2836 }
2837 // exact_klass and exact_signature_k can be both non null but
2838 // different if exact_klass is loaded after the ciObject for
2839 // exact_signature_k is created.
2840 if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2841 // sometimes the type of the signature is better than the best type
2842 // the compiler has
2843 exact_klass = exact_signature_k;
2844 }
2845 if (callee_signature_k != nullptr &&
2846 callee_signature_k != signature_at_call_k) {
2847 ciKlass* improved_klass = callee_signature_k->exact_klass();
2848 if (improved_klass == nullptr) {
2849 improved_klass = comp->cha_exact_type(callee_signature_k);
2850 }
2851 if (exact_klass == nullptr && improved_klass != nullptr && exact_klass != improved_klass) {
2852 exact_klass = exact_signature_k;
2853 }
2854 }
2855 do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2856 }
2857
2858 if (exact_klass != nullptr && exact_klass->is_obj_array_klass()) {
2859 if (exact_klass->can_be_inline_array_klass()) {
2860 // Inline type arrays can have additional properties, we need to load the klass
2861 // TODO 8350865 Can we do better here and track the properties?
2862 exact_klass = nullptr;
2863 do_update = true;
2864 } else {
2865 // For a direct pointer comparison, we need the refined array klass pointer
2866 exact_klass = ciObjArrayKlass::make(exact_klass->as_array_klass()->element_klass());
2867 do_update = ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2868 }
2869 }
2870 if (!do_null && !do_update) {
2871 return result;
2872 }
2873
2874 if (mdp == LIR_OprFact::illegalOpr) {
2875 mdp = new_register(T_METADATA);
2876 __ metadata2reg(md->constant_encoding(), mdp);
2877 if (md_base_offset != 0) {
2878 LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2879 mdp = new_pointer_register();
2880 __ leal(LIR_OprFact::address(base_type_address), mdp);
2881 }
2882 }
2883 LIRItem value(obj, this);
2884 value.load_item();
2885 __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2886 value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != nullptr);
2887 return result;
2888 }
2889
2890 // profile parameters on entry to the root of the compilation
2891 void LIRGenerator::profile_parameters(Base* x) {
2892 if (compilation()->profile_parameters()) {
2893 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2894 ciMethodData* md = scope()->method()->method_data_or_null();
2895 assert(md != nullptr, "Sanity");
2896
2897 if (md->parameters_type_data() != nullptr) {
2898 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2899 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
2900 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2901 for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2902 LIR_Opr src = args->at(i);
2903 assert(!src->is_illegal(), "check");
2904 BasicType t = src->type();
2905 if (is_reference_type(t)) {
2906 intptr_t profiled_k = parameters->type(j);
2907 Local* local = x->state()->local_at(java_index)->as_Local();
2908 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2909 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2910 profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2911 // If the profile is known statically set it once for all and do not emit any code
2912 if (exact != nullptr) {
2913 md->set_parameter_type(j, exact);
2914 }
2915 j++;
2916 }
2917 java_index += type2size[t];
2918 }
2919 }
2920 }
2921 }
2922
2923 void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) {
2924 assert(md != nullptr && data != nullptr, "should have been initialized");
2925 LIR_Opr mdp = new_register(T_METADATA);
2926 __ metadata2reg(md->constant_encoding(), mdp);
2927 LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE);
2928 LIR_Opr flags = new_register(T_INT);
2929 __ move(addr, flags);
2930 LIR_Opr update;
2931 if (condition != lir_cond_always) {
2932 update = new_register(T_INT);
2933 __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT);
2934 } else {
2935 update = LIR_OprFact::intConst(flag);
2936 }
2937 __ logical_or(flags, update, flags);
2938 __ store(flags, addr);
2939 }
2940
2941 void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ciProfileData* data) {
2942 assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
2943 LabelObj* L_end = new LabelObj();
2944 LIR_Opr tmp = new_register(T_METADATA);
2945 __ check_null_free_array(array.result(), tmp);
2946
2947 profile_flags(md, data, ArrayStoreData::null_free_array_byte_constant(), lir_cond_equal);
2948 }
2949
2950 template <class ArrayData> void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ArrayData*& load_store) {
2951 assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
2952 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2953 profile_type(md, md->byte_offset_of_slot(load_store, ArrayData::array_offset()), 0,
2954 load_store->array()->type(), x->array(), mdp, true, nullptr, nullptr);
2955 }
2956
2957 void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadData* load_data) {
2958 assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
2959 assert(md != nullptr && load_data != nullptr, "should have been initialized");
2960 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2961 profile_type(md, md->byte_offset_of_slot(load_data, ArrayLoadData::element_offset()), 0,
2962 load_data->element()->type(), element, mdp, false, nullptr, nullptr);
2963 }
2964
2965 void LIRGenerator::do_Base(Base* x) {
2966 __ std_entry(LIR_OprFact::illegalOpr);
2967 // Emit moves from physical registers / stack slots to virtual registers
2968 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2969 IRScope* irScope = compilation()->hir()->top_scope();
2970 int java_index = 0;
2971 for (int i = 0; i < args->length(); i++) {
2972 LIR_Opr src = args->at(i);
2973 assert(!src->is_illegal(), "check");
2974 BasicType t = src->type();
2975
2976 // Types which are smaller than int are passed as int, so
2977 // correct the type which passed.
2978 switch (t) {
2979 case T_BYTE:
2980 case T_BOOLEAN:
2981 case T_SHORT:
2982 case T_CHAR:
2983 t = T_INT;
2984 break;
2985 default:
2986 break;
2987 }
2988
2989 LIR_Opr dest = new_register(t);
2990 __ move(src, dest);
2991
2992 // Assign new location to Local instruction for this local
2993 Local* local = x->state()->local_at(java_index)->as_Local();
2994 assert(local != nullptr, "Locals for incoming arguments must have been created");
2995 #ifndef __SOFTFP__
2996 // The java calling convention passes double as long and float as int.
2997 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2998 #endif // __SOFTFP__
2999 local->set_operand(dest);
3000 #ifdef ASSERT
3001 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, nullptr);
3002 #endif
3003 java_index += type2size[t];
3004 }
3005
3006 // Check if we need a membar at the beginning of the java.lang.Object
3007 // constructor to satisfy the memory model for strict fields.
3008 if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
3009 __ membar_storestore();
3010 }
3011
3012 if (compilation()->env()->dtrace_method_probes()) {
3013 BasicTypeList signature;
3014 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3015 signature.append(T_METADATA); // Method*
3016 LIR_OprList* args = new LIR_OprList();
3017 args->append(getThreadPointer());
3018 LIR_Opr meth = new_register(T_METADATA);
3019 __ metadata2reg(method()->constant_encoding(), meth);
3020 args->append(meth);
3021 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, nullptr);
3022 }
3023
3024 if (method()->is_synchronized()) {
3025 LIR_Opr obj;
3026 if (method()->is_static()) {
3027 obj = new_register(T_OBJECT);
3028 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
3029 } else {
3030 Local* receiver = x->state()->local_at(0)->as_Local();
3031 assert(receiver != nullptr, "must already exist");
3032 obj = receiver->operand();
3033 }
3034 assert(obj->is_valid(), "must be valid");
3035
3036 if (method()->is_synchronized()) {
3037 LIR_Opr lock = syncLockOpr();
3038 __ load_stack_address_monitor(0, lock);
3039
3040 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
3041 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3042
3043 // receiver is guaranteed non-null so don't need CodeEmitInfo
3044 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
3045 }
3046 }
3047 // increment invocation counters if needed
3048 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3049 profile_parameters(x);
3050 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
3051 increment_invocation_counter(info);
3052 }
3053 if (method()->has_scalarized_args()) {
3054 // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments
3055 // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state.
3056 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), nullptr, false);
3057 CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none);
3058 __ append(new LIR_Op0(lir_check_orig_pc));
3059 __ branch(lir_cond_notEqual, deopt_stub);
3060 }
3061
3062 // all blocks with a successor must end with an unconditional jump
3063 // to the successor even if they are consecutive
3064 __ jump(x->default_sux());
3065 }
3066
3067
3068 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3069 // construct our frame and model the production of incoming pointer
3070 // to the OSR buffer.
3071 __ osr_entry(LIR_Assembler::osrBufferPointer());
3072 LIR_Opr result = rlock_result(x);
3073 __ move(LIR_Assembler::osrBufferPointer(), result);
3074 }
3075
3076 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
3077 if (loc->is_register()) {
3078 param->load_item_force(loc);
3079 } else {
3080 LIR_Address* addr = loc->as_address_ptr();
3081 param->load_for_store(addr->type());
3082 if (addr->type() == T_OBJECT) {
3083 __ move_wide(param->result(), addr);
3084 } else {
3085 __ move(param->result(), addr);
3086 }
3087 }
3088 }
3089
3090 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3091 assert(args->length() == arg_list->length(),
3092 "args=%d, arg_list=%d", args->length(), arg_list->length());
3093 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3094 LIRItem* param = args->at(i);
3095 LIR_Opr loc = arg_list->at(i);
3096 invoke_load_one_argument(param, loc);
3097 }
3098
3099 if (x->has_receiver()) {
3100 LIRItem* receiver = args->at(0);
3101 LIR_Opr loc = arg_list->at(0);
3102 if (loc->is_register()) {
3103 receiver->load_item_force(loc);
3104 } else {
3105 assert(loc->is_address(), "just checking");
3106 receiver->load_for_store(T_OBJECT);
3107 __ move_wide(receiver->result(), loc->as_address_ptr());
3108 }
3109 }
3110 }
3111
3112
3113 // Visits all arguments, returns appropriate items without loading them
3114 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3115 LIRItemList* argument_items = new LIRItemList();
3116 if (x->has_receiver()) {
3117 LIRItem* receiver = new LIRItem(x->receiver(), this);
3118 argument_items->append(receiver);
3119 }
3120 for (int i = 0; i < x->number_of_arguments(); i++) {
3121 LIRItem* param = new LIRItem(x->argument_at(i), this);
3122 argument_items->append(param);
3123 }
3124 return argument_items;
3125 }
3126
3127
3128 // The invoke with receiver has following phases:
3129 // a) traverse and load/lock receiver;
3130 // b) traverse all arguments -> item-array (invoke_visit_argument)
3131 // c) push receiver on stack
3132 // d) load each of the items and push on stack
3133 // e) unlock receiver
3134 // f) move receiver into receiver-register %o0
3135 // g) lock result registers and emit call operation
3136 //
3137 // Before issuing a call, we must spill-save all values on stack
3138 // that are in caller-save register. "spill-save" moves those registers
3139 // either in a free callee-save register or spills them if no free
3140 // callee save register is available.
3141 //
3142 // The problem is where to invoke spill-save.
3143 // - if invoked between e) and f), we may lock callee save
3144 // register in "spill-save" that destroys the receiver register
3145 // before f) is executed
3146 // - if we rearrange f) to be earlier (by loading %o0) it
3147 // may destroy a value on the stack that is currently in %o0
3148 // and is waiting to be spilled
3149 // - if we keep the receiver locked while doing spill-save,
3150 // we cannot spill it as it is spill-locked
3151 //
3152 void LIRGenerator::do_Invoke(Invoke* x) {
3153 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
3154
3155 LIR_OprList* arg_list = cc->args();
3156 LIRItemList* args = invoke_visit_arguments(x);
3157 LIR_Opr receiver = LIR_OprFact::illegalOpr;
3158
3159 // setup result register
3160 LIR_Opr result_register = LIR_OprFact::illegalOpr;
3161 if (x->type() != voidType) {
3162 result_register = result_register_for(x->type());
3163 }
3164
3165 CodeEmitInfo* info = state_for(x, x->state());
3166
3167 invoke_load_arguments(x, args, arg_list);
3168
3169 if (x->has_receiver()) {
3170 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
3171 receiver = args->at(0)->result();
3172 }
3173
3174 // emit invoke code
3175 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
3176
3177 ciMethod* target = x->target();
3178 switch (x->code()) {
3179 case Bytecodes::_invokestatic:
3180 __ call_static(target, result_register,
3181 SharedRuntime::get_resolve_static_call_stub(),
3182 arg_list, info);
3183 break;
3184 case Bytecodes::_invokespecial:
3185 case Bytecodes::_invokevirtual:
3186 case Bytecodes::_invokeinterface:
3187 // for loaded and final (method or class) target we still produce an inline cache,
3188 // in order to be able to call mixed mode
3189 if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {
3190 __ call_opt_virtual(target, receiver, result_register,
3191 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3192 arg_list, info);
3193 } else {
3194 __ call_icvirtual(target, receiver, result_register,
3195 SharedRuntime::get_resolve_virtual_call_stub(),
3196 arg_list, info);
3197 }
3198 break;
3199 case Bytecodes::_invokedynamic: {
3200 __ call_dynamic(target, receiver, result_register,
3201 SharedRuntime::get_resolve_static_call_stub(),
3202 arg_list, info);
3203 break;
3204 }
3205 default:
3206 fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
3207 break;
3208 }
3209
3210 if (result_register->is_valid()) {
3211 LIR_Opr result = rlock_result(x);
3212 __ move(result_register, result);
3213 }
3214 }
3215
3216
3217 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3218 assert(x->number_of_arguments() == 1, "wrong type");
3219 LIRItem value (x->argument_at(0), this);
3220 LIR_Opr reg = rlock_result(x);
3221 value.load_item();
3222 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3223 __ move(tmp, reg);
3224 }
3225
3226
3227
3228 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3229 void LIRGenerator::do_IfOp(IfOp* x) {
3230 #ifdef ASSERT
3231 {
3232 ValueTag xtag = x->x()->type()->tag();
3233 ValueTag ttag = x->tval()->type()->tag();
3234 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3235 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3236 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3237 }
3238 #endif
3239
3240 LIRItem left(x->x(), this);
3241 LIRItem right(x->y(), this);
3242 left.load_item();
3243 if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
3244 right.dont_load_item();
3245 } else {
3246 // substitutability_check() needs to use right as a base register.
3247 right.load_item();
3248 }
3249
3250 LIRItem t_val(x->tval(), this);
3251 LIRItem f_val(x->fval(), this);
3252 t_val.dont_load_item();
3253 f_val.dont_load_item();
3254
3255 if (x->substitutability_check()) {
3256 substitutability_check(x, left, right, t_val, f_val);
3257 } else {
3258 LIR_Opr reg = rlock_result(x);
3259 __ cmp(lir_cond(x->cond()), left.result(), right.result());
3260 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3261 }
3262 }
3263
3264 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
3265 assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3266 bool is_acmpeq = (x->cond() == If::eql);
3267 LIR_Opr equal_result = is_acmpeq ? t_val.result() : f_val.result();
3268 LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
3269 LIR_Opr result = rlock_result(x);
3270 CodeEmitInfo* info = state_for(x, x->state_before());
3271
3272 substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3273 }
3274
3275 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
3276 LIR_Opr equal_result = LIR_OprFact::intConst(1);
3277 LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
3278 LIR_Opr result = new_register(T_INT);
3279 CodeEmitInfo* info = state_for(x, x->state_before());
3280
3281 substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3282
3283 assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3284 __ cmp(lir_cond(x->cond()), result, equal_result);
3285 }
3286
3287 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
3288 LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
3289 CodeEmitInfo* info) {
3290 LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
3291 LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
3292 LIR_Opr left_klass_op = LIR_OprFact::illegalOpr;
3293 LIR_Opr right_klass_op = LIR_OprFact::illegalOpr;
3294
3295 ciKlass* left_klass = left_val ->as_loaded_klass_or_null();
3296 ciKlass* right_klass = right_val->as_loaded_klass_or_null();
3297
3298 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
3299 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
3300 init_temps_for_substitutability_check(tmp1, tmp2);
3301 }
3302
3303 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
3304 // No need to load klass -- the operands are statically known to be the same inline klass.
3305 } else {
3306 BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
3307 left_klass_op = new_register(t_klass);
3308 right_klass_op = new_register(t_klass);
3309 }
3310
3311 CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
3312 __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
3313 tmp1, tmp2,
3314 left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path);
3315 }
3316
3317 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3318 assert(x->number_of_arguments() == 0, "wrong type");
3319 // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3320 BasicTypeList signature;
3321 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3322 LIR_Opr reg = result_register_for(x->type());
3323 __ call_runtime_leaf(routine, getThreadTemp(),
3324 reg, new LIR_OprList());
3325 LIR_Opr result = rlock_result(x);
3326 __ move(reg, result);
3327 }
3328
3329
3330
3331 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3332 switch (x->id()) {
3333 case vmIntrinsics::_intBitsToFloat :
3334 case vmIntrinsics::_doubleToRawLongBits :
3335 case vmIntrinsics::_longBitsToDouble :
3336 case vmIntrinsics::_floatToRawIntBits : {
3337 do_FPIntrinsics(x);
3338 break;
3339 }
3340
3341 #ifdef JFR_HAVE_INTRINSICS
3342 case vmIntrinsics::_counterTime:
3343 do_RuntimeCall(CAST_FROM_FN_PTR(address, JfrTime::time_function()), x);
3344 break;
3345 #endif
3346
3347 case vmIntrinsics::_currentTimeMillis:
3348 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x);
3349 break;
3350
3351 case vmIntrinsics::_nanoTime:
3352 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x);
3353 break;
3354
3355 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
3356 case vmIntrinsics::_isInstance: do_isInstance(x); break;
3357 case vmIntrinsics::_getClass: do_getClass(x); break;
3358 case vmIntrinsics::_getObjectSize: do_getObjectSize(x); break;
3359 case vmIntrinsics::_currentCarrierThread: do_currentCarrierThread(x); break;
3360 case vmIntrinsics::_currentThread: do_vthread(x); break;
3361 case vmIntrinsics::_scopedValueCache: do_scopedValueCache(x); break;
3362
3363 case vmIntrinsics::_dlog: // fall through
3364 case vmIntrinsics::_dlog10: // fall through
3365 case vmIntrinsics::_dabs: // fall through
3366 case vmIntrinsics::_dsqrt: // fall through
3367 case vmIntrinsics::_dsqrt_strict: // fall through
3368 case vmIntrinsics::_dtan: // fall through
3369 case vmIntrinsics::_dsinh: // fall through
3370 case vmIntrinsics::_dtanh: // fall through
3371 case vmIntrinsics::_dsin : // fall through
3372 case vmIntrinsics::_dcos : // fall through
3373 case vmIntrinsics::_dcbrt : // fall through
3374 case vmIntrinsics::_dexp : // fall through
3375 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3376 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3377
3378 case vmIntrinsics::_fmaD: do_FmaIntrinsic(x); break;
3379 case vmIntrinsics::_fmaF: do_FmaIntrinsic(x); break;
3380
3381 // Use java.lang.Math intrinsics code since it works for these intrinsics too.
3382 case vmIntrinsics::_floatToFloat16: // fall through
3383 case vmIntrinsics::_float16ToFloat: do_MathIntrinsic(x); break;
3384
3385 case vmIntrinsics::_Preconditions_checkIndex:
3386 do_PreconditionsCheckIndex(x, T_INT);
3387 break;
3388 case vmIntrinsics::_Preconditions_checkLongIndex:
3389 do_PreconditionsCheckIndex(x, T_LONG);
3390 break;
3391
3392 case vmIntrinsics::_compareAndSetReference:
3393 do_CompareAndSwap(x, objectType);
3394 break;
3395 case vmIntrinsics::_compareAndSetInt:
3396 do_CompareAndSwap(x, intType);
3397 break;
3398 case vmIntrinsics::_compareAndSetLong:
3399 do_CompareAndSwap(x, longType);
3400 break;
3401
3402 case vmIntrinsics::_loadFence :
3403 __ membar_acquire();
3404 break;
3405 case vmIntrinsics::_storeFence:
3406 __ membar_release();
3407 break;
3408 case vmIntrinsics::_storeStoreFence:
3409 __ membar_storestore();
3410 break;
3411 case vmIntrinsics::_fullFence :
3412 __ membar();
3413 break;
3414 case vmIntrinsics::_onSpinWait:
3415 __ on_spin_wait();
3416 break;
3417 case vmIntrinsics::_Reference_get0:
3418 do_Reference_get0(x);
3419 break;
3420
3421 case vmIntrinsics::_updateCRC32:
3422 case vmIntrinsics::_updateBytesCRC32:
3423 case vmIntrinsics::_updateByteBufferCRC32:
3424 do_update_CRC32(x);
3425 break;
3426
3427 case vmIntrinsics::_updateBytesCRC32C:
3428 case vmIntrinsics::_updateDirectByteBufferCRC32C:
3429 do_update_CRC32C(x);
3430 break;
3431
3432 case vmIntrinsics::_vectorizedMismatch:
3433 do_vectorizedMismatch(x);
3434 break;
3435
3436 case vmIntrinsics::_blackhole:
3437 do_blackhole(x);
3438 break;
3439
3440 default: ShouldNotReachHere(); break;
3441 }
3442 }
3443
3444 void LIRGenerator::profile_arguments(ProfileCall* x) {
3445 if (compilation()->profile_arguments()) {
3446 int bci = x->bci_of_invoke();
3447 ciMethodData* md = x->method()->method_data_or_null();
3448 assert(md != nullptr, "Sanity");
3449 ciProfileData* data = md->bci_to_data(bci);
3450 if (data != nullptr) {
3451 if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3452 (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3453 ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3454 int base_offset = md->byte_offset_of_slot(data, extra);
3455 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3456 ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3457
3458 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3459 int start = 0;
3460 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3461 if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3462 // first argument is not profiled at call (method handle invoke)
3463 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3464 start = 1;
3465 }
3466 ciSignature* callee_signature = x->callee()->signature();
3467 // method handle call to virtual method
3468 bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3469 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : nullptr);
3470
3471 bool ignored_will_link;
3472 ciSignature* signature_at_call = nullptr;
3473 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3474 ciSignatureStream signature_at_call_stream(signature_at_call);
3475
3476 // if called through method handle invoke, some arguments may have been popped
3477 for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3478 int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3479 ciKlass* exact = profile_type(md, base_offset, off,
3480 args->type(i), x->profiled_arg_at(i+start), mdp,
3481 !x->arg_needs_null_check(i+start),
3482 signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3483 if (exact != nullptr) {
3484 md->set_argument_type(bci, i, exact);
3485 }
3486 }
3487 } else {
3488 #ifdef ASSERT
3489 Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3490 int n = x->nb_profiled_args();
3491 assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3492 (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3493 "only at JSR292 bytecodes");
3494 #endif
3495 }
3496 }
3497 }
3498 }
3499
3500 // profile parameters on entry to an inlined method
3501 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3502 if (compilation()->profile_parameters() && x->inlined()) {
3503 ciMethodData* md = x->callee()->method_data_or_null();
3504 if (md != nullptr) {
3505 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3506 if (parameters_type_data != nullptr) {
3507 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
3508 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3509 bool has_receiver = !x->callee()->is_static();
3510 ciSignature* sig = x->callee()->signature();
3511 ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : nullptr);
3512 int i = 0; // to iterate on the Instructions
3513 Value arg = x->recv();
3514 bool not_null = false;
3515 int bci = x->bci_of_invoke();
3516 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3517 // The first parameter is the receiver so that's what we start
3518 // with if it exists. One exception is method handle call to
3519 // virtual method: the receiver is in the args list
3520 if (arg == nullptr || !Bytecodes::has_receiver(bc)) {
3521 i = 1;
3522 arg = x->profiled_arg_at(0);
3523 not_null = !x->arg_needs_null_check(0);
3524 }
3525 int k = 0; // to iterate on the profile data
3526 for (;;) {
3527 intptr_t profiled_k = parameters->type(k);
3528 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3529 in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3530 profiled_k, arg, mdp, not_null, sig_stream.next_klass(), nullptr);
3531 // If the profile is known statically set it once for all and do not emit any code
3532 if (exact != nullptr) {
3533 md->set_parameter_type(k, exact);
3534 }
3535 k++;
3536 if (k >= parameters_type_data->number_of_parameters()) {
3537 #ifdef ASSERT
3538 int extra = 0;
3539 if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3540 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3541 x->recv() != nullptr && Bytecodes::has_receiver(bc)) {
3542 extra += 1;
3543 }
3544 assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3545 #endif
3546 break;
3547 }
3548 arg = x->profiled_arg_at(i);
3549 not_null = !x->arg_needs_null_check(i);
3550 i++;
3551 }
3552 }
3553 }
3554 }
3555 }
3556
3557 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3558 // Need recv in a temporary register so it interferes with the other temporaries
3559 LIR_Opr recv = LIR_OprFact::illegalOpr;
3560 LIR_Opr mdo = new_register(T_METADATA);
3561 // tmp is used to hold the counters on SPARC
3562 LIR_Opr tmp = new_pointer_register();
3563
3564 if (x->nb_profiled_args() > 0) {
3565 profile_arguments(x);
3566 }
3567
3568 // profile parameters on inlined method entry including receiver
3569 if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3570 profile_parameters_at_call(x);
3571 }
3572
3573 if (x->recv() != nullptr) {
3574 LIRItem value(x->recv(), this);
3575 value.load_item();
3576 recv = new_register(T_OBJECT);
3577 __ move(value.result(), recv);
3578 }
3579 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3580 }
3581
3582 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3583 int bci = x->bci_of_invoke();
3584 ciMethodData* md = x->method()->method_data_or_null();
3585 assert(md != nullptr, "Sanity");
3586 ciProfileData* data = md->bci_to_data(bci);
3587 if (data != nullptr) {
3588 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3589 ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3590 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3591
3592 bool ignored_will_link;
3593 ciSignature* signature_at_call = nullptr;
3594 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3595
3596 // The offset within the MDO of the entry to update may be too large
3597 // to be used in load/store instructions on some platforms. So have
3598 // profile_type() compute the address of the profile in a register.
3599 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3600 ret->type(), x->ret(), mdp,
3601 !x->needs_null_check(),
3602 signature_at_call->return_type()->as_klass(),
3603 x->callee()->signature()->return_type()->as_klass());
3604 if (exact != nullptr) {
3605 md->set_return_type(bci, exact);
3606 }
3607 }
3608 }
3609
3610 bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) {
3611 ciKlass* klass = value->as_loaded_klass_or_null();
3612 if (klass != nullptr) {
3613 if (klass->is_inlinetype()) {
3614 profile_flags(md, data, flag, lir_cond_always);
3615 } else if (klass->can_be_inline_klass()) {
3616 return false;
3617 }
3618 } else {
3619 return false;
3620 }
3621 return true;
3622 }
3623
3624
3625 void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) {
3626 ciMethod* method = x->method();
3627 assert(method != nullptr, "method should be set if branch is profiled");
3628 ciMethodData* md = method->method_data_or_null();
3629 assert(md != nullptr, "Sanity");
3630 ciProfileData* data = md->bci_to_data(x->bci());
3631 assert(data != nullptr, "must have profiling data");
3632 assert(data->is_ACmpData(), "need BranchData for two-way branches");
3633 ciACmpData* acmp = (ciACmpData*)data;
3634 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3635 profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0,
3636 acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), nullptr, nullptr);
3637 int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset());
3638 if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) {
3639 LIR_Opr mdp = new_register(T_METADATA);
3640 __ metadata2reg(md->constant_encoding(), mdp);
3641 LIRItem value(x->left(), this);
3642 value.load_item();
3643 __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3644 }
3645 profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()),
3646 in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()),
3647 acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), nullptr, nullptr);
3648 if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) {
3649 LIR_Opr mdp = new_register(T_METADATA);
3650 __ metadata2reg(md->constant_encoding(), mdp);
3651 LIRItem value(x->right(), this);
3652 value.load_item();
3653 __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3654 }
3655 }
3656
3657 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3658 // We can safely ignore accessors here, since c2 will inline them anyway,
3659 // accessors are also always mature.
3660 if (!x->inlinee()->is_accessor()) {
3661 CodeEmitInfo* info = state_for(x, x->state(), true);
3662 // Notify the runtime very infrequently only to take care of counter overflows
3663 int freq_log = Tier23InlineeNotifyFreqLog;
3664 double scale;
3665 if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3666 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3667 }
3668 increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3669 }
3670 }
3671
3672 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3673 if (compilation()->is_profiling()) {
3674 #if defined(X86) && !defined(_LP64)
3675 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3676 LIR_Opr left_copy = new_register(left->type());
3677 __ move(left, left_copy);
3678 __ cmp(cond, left_copy, right);
3679 #else
3680 __ cmp(cond, left, right);
3681 #endif
3682 LIR_Opr step = new_register(T_INT);
3683 LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment);
3684 LIR_Opr zero = LIR_OprFact::intConst(0);
3685 __ cmove(cond,
3686 (left_bci < bci) ? plus_one : zero,
3687 (right_bci < bci) ? plus_one : zero,
3688 step, left->type());
3689 increment_backedge_counter(info, step, bci);
3690 }
3691 }
3692
3693
3694 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) {
3695 int freq_log = 0;
3696 int level = compilation()->env()->comp_level();
3697 if (level == CompLevel_limited_profile) {
3698 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3699 } else if (level == CompLevel_full_profile) {
3700 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3701 } else {
3702 ShouldNotReachHere();
3703 }
3704 // Increment the appropriate invocation/backedge counter and notify the runtime.
3705 double scale;
3706 if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3707 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3708 }
3709 increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
3710 }
3711
3712 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3713 ciMethod *method, LIR_Opr step, int frequency,
3714 int bci, bool backedge, bool notify) {
3715 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3716 int level = _compilation->env()->comp_level();
3717 assert(level > CompLevel_simple, "Shouldn't be here");
3718
3719 int offset = -1;
3720 LIR_Opr counter_holder;
3721 if (level == CompLevel_limited_profile) {
3722 MethodCounters* counters_adr = method->ensure_method_counters();
3723 if (counters_adr == nullptr) {
3724 bailout("method counters allocation failed");
3725 return;
3726 }
3727 counter_holder = new_pointer_register();
3728 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3729 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3730 MethodCounters::invocation_counter_offset());
3731 } else if (level == CompLevel_full_profile) {
3732 counter_holder = new_register(T_METADATA);
3733 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3734 MethodData::invocation_counter_offset());
3735 ciMethodData* md = method->method_data_or_null();
3736 assert(md != nullptr, "Sanity");
3737 __ metadata2reg(md->constant_encoding(), counter_holder);
3738 } else {
3739 ShouldNotReachHere();
3740 }
3741 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3742 LIR_Opr result = new_register(T_INT);
3743 __ load(counter, result);
3744 __ add(result, step, result);
3745 __ store(result, counter);
3746 if (notify && (!backedge || UseOnStackReplacement)) {
3747 LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
3748 // The bci for info can point to cmp for if's we want the if bci
3749 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3750 int freq = frequency << InvocationCounter::count_shift;
3751 if (freq == 0) {
3752 if (!step->is_constant()) {
3753 __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3754 __ branch(lir_cond_notEqual, overflow);
3755 } else {
3756 __ branch(lir_cond_always, overflow);
3757 }
3758 } else {
3759 LIR_Opr mask = load_immediate(freq, T_INT);
3760 if (!step->is_constant()) {
3761 // If step is 0, make sure the overflow check below always fails
3762 __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3763 __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT);
3764 }
3765 __ logical_and(result, mask, result);
3766 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3767 __ branch(lir_cond_equal, overflow);
3768 }
3769 __ branch_destination(overflow->continuation());
3770 }
3771 }
3772
3773 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3774 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3775 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3776
3777 if (x->pass_thread()) {
3778 signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3779 args->append(getThreadPointer());
3780 }
3781
3782 for (int i = 0; i < x->number_of_arguments(); i++) {
3783 Value a = x->argument_at(i);
3784 LIRItem* item = new LIRItem(a, this);
3785 item->load_item();
3786 args->append(item->result());
3787 signature->append(as_BasicType(a->type()));
3788 }
3789
3790 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), nullptr);
3791 if (x->type() == voidType) {
3792 set_no_result(x);
3793 } else {
3794 __ move(result, rlock_result(x));
3795 }
3796 }
3797
3798 #ifdef ASSERT
3799 void LIRGenerator::do_Assert(Assert *x) {
3800 ValueTag tag = x->x()->type()->tag();
3801 If::Condition cond = x->cond();
3802
3803 LIRItem xitem(x->x(), this);
3804 LIRItem yitem(x->y(), this);
3805 LIRItem* xin = &xitem;
3806 LIRItem* yin = &yitem;
3807
3808 assert(tag == intTag, "Only integer assertions are valid!");
3809
3810 xin->load_item();
3811 yin->dont_load_item();
3812
3813 set_no_result(x);
3814
3815 LIR_Opr left = xin->result();
3816 LIR_Opr right = yin->result();
3817
3818 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3819 }
3820 #endif
3821
3822 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3823
3824
3825 Instruction *a = x->x();
3826 Instruction *b = x->y();
3827 if (!a || StressRangeCheckElimination) {
3828 assert(!b || StressRangeCheckElimination, "B must also be null");
3829
3830 CodeEmitInfo *info = state_for(x, x->state());
3831 CodeStub* stub = new PredicateFailedStub(info);
3832
3833 __ jump(stub);
3834 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3835 int a_int = a->type()->as_IntConstant()->value();
3836 int b_int = b->type()->as_IntConstant()->value();
3837
3838 bool ok = false;
3839
3840 switch(x->cond()) {
3841 case Instruction::eql: ok = (a_int == b_int); break;
3842 case Instruction::neq: ok = (a_int != b_int); break;
3843 case Instruction::lss: ok = (a_int < b_int); break;
3844 case Instruction::leq: ok = (a_int <= b_int); break;
3845 case Instruction::gtr: ok = (a_int > b_int); break;
3846 case Instruction::geq: ok = (a_int >= b_int); break;
3847 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3848 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3849 default: ShouldNotReachHere();
3850 }
3851
3852 if (ok) {
3853
3854 CodeEmitInfo *info = state_for(x, x->state());
3855 CodeStub* stub = new PredicateFailedStub(info);
3856
3857 __ jump(stub);
3858 }
3859 } else {
3860
3861 ValueTag tag = x->x()->type()->tag();
3862 If::Condition cond = x->cond();
3863 LIRItem xitem(x->x(), this);
3864 LIRItem yitem(x->y(), this);
3865 LIRItem* xin = &xitem;
3866 LIRItem* yin = &yitem;
3867
3868 assert(tag == intTag, "Only integer deoptimizations are valid!");
3869
3870 xin->load_item();
3871 yin->dont_load_item();
3872 set_no_result(x);
3873
3874 LIR_Opr left = xin->result();
3875 LIR_Opr right = yin->result();
3876
3877 CodeEmitInfo *info = state_for(x, x->state());
3878 CodeStub* stub = new PredicateFailedStub(info);
3879
3880 __ cmp(lir_cond(cond), left, right);
3881 __ branch(lir_cond(cond), stub);
3882 }
3883 }
3884
3885 void LIRGenerator::do_blackhole(Intrinsic *x) {
3886 assert(!x->has_receiver(), "Should have been checked before: only static methods here");
3887 for (int c = 0; c < x->number_of_arguments(); c++) {
3888 // Load the argument
3889 LIRItem vitem(x->argument_at(c), this);
3890 vitem.load_item();
3891 // ...and leave it unused.
3892 }
3893 }
3894
3895 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3896 LIRItemList args(1);
3897 LIRItem value(arg1, this);
3898 args.append(&value);
3899 BasicTypeList signature;
3900 signature.append(as_BasicType(arg1->type()));
3901
3902 return call_runtime(&signature, &args, entry, result_type, info);
3903 }
3904
3905
3906 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3907 LIRItemList args(2);
3908 LIRItem value1(arg1, this);
3909 LIRItem value2(arg2, this);
3910 args.append(&value1);
3911 args.append(&value2);
3912 BasicTypeList signature;
3913 signature.append(as_BasicType(arg1->type()));
3914 signature.append(as_BasicType(arg2->type()));
3915
3916 return call_runtime(&signature, &args, entry, result_type, info);
3917 }
3918
3919
3920 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3921 address entry, ValueType* result_type, CodeEmitInfo* info) {
3922 // get a result register
3923 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3924 LIR_Opr result = LIR_OprFact::illegalOpr;
3925 if (result_type->tag() != voidTag) {
3926 result = new_register(result_type);
3927 phys_reg = result_register_for(result_type);
3928 }
3929
3930 // move the arguments into the correct location
3931 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3932 assert(cc->length() == args->length(), "argument mismatch");
3933 for (int i = 0; i < args->length(); i++) {
3934 LIR_Opr arg = args->at(i);
3935 LIR_Opr loc = cc->at(i);
3936 if (loc->is_register()) {
3937 __ move(arg, loc);
3938 } else {
3939 LIR_Address* addr = loc->as_address_ptr();
3940 // if (!can_store_as_constant(arg)) {
3941 // LIR_Opr tmp = new_register(arg->type());
3942 // __ move(arg, tmp);
3943 // arg = tmp;
3944 // }
3945 __ move(arg, addr);
3946 }
3947 }
3948
3949 if (info) {
3950 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3951 } else {
3952 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3953 }
3954 if (result->is_valid()) {
3955 __ move(phys_reg, result);
3956 }
3957 return result;
3958 }
3959
3960
3961 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3962 address entry, ValueType* result_type, CodeEmitInfo* info) {
3963 // get a result register
3964 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3965 LIR_Opr result = LIR_OprFact::illegalOpr;
3966 if (result_type->tag() != voidTag) {
3967 result = new_register(result_type);
3968 phys_reg = result_register_for(result_type);
3969 }
3970
3971 // move the arguments into the correct location
3972 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3973
3974 assert(cc->length() == args->length(), "argument mismatch");
3975 for (int i = 0; i < args->length(); i++) {
3976 LIRItem* arg = args->at(i);
3977 LIR_Opr loc = cc->at(i);
3978 if (loc->is_register()) {
3979 arg->load_item_force(loc);
3980 } else {
3981 LIR_Address* addr = loc->as_address_ptr();
3982 arg->load_for_store(addr->type());
3983 __ move(arg->result(), addr);
3984 }
3985 }
3986
3987 if (info) {
3988 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3989 } else {
3990 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3991 }
3992 if (result->is_valid()) {
3993 __ move(phys_reg, result);
3994 }
3995 return result;
3996 }
3997
3998 void LIRGenerator::do_MemBar(MemBar* x) {
3999 LIR_Code code = x->code();
4000 switch(code) {
4001 case lir_membar_acquire : __ membar_acquire(); break;
4002 case lir_membar_release : __ membar_release(); break;
4003 case lir_membar : __ membar(); break;
4004 case lir_membar_loadload : __ membar_loadload(); break;
4005 case lir_membar_storestore: __ membar_storestore(); break;
4006 case lir_membar_loadstore : __ membar_loadstore(); break;
4007 case lir_membar_storeload : __ membar_storeload(); break;
4008 default : ShouldNotReachHere(); break;
4009 }
4010 }
4011
4012 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
4013 LIR_Opr value_fixed = rlock_byte(T_BYTE);
4014 if (two_operand_lir_form) {
4015 __ move(value, value_fixed);
4016 __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
4017 } else {
4018 __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
4019 }
4020 LIR_Opr klass = new_register(T_METADATA);
4021 load_klass(array, klass, null_check_info);
4022 null_check_info = nullptr;
4023 LIR_Opr layout = new_register(T_INT);
4024 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
4025 int diffbit = Klass::layout_helper_boolean_diffbit();
4026 __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
4027 __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
4028 __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
4029 value = value_fixed;
4030 return value;
4031 }