1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/compressedOops.hpp"
30 #include "opto/ad.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/callnode.hpp"
33 #include "opto/idealGraphPrinter.hpp"
34 #include "opto/matcher.hpp"
35 #include "opto/memnode.hpp"
36 #include "opto/movenode.hpp"
37 #include "opto/opcodes.hpp"
38 #include "opto/regmask.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/type.hpp"
42 #include "opto/vectornode.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/align.hpp"
46
47 OptoReg::Name OptoReg::c_frame_pointer;
48
49 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
50 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
51 RegMask Matcher::caller_save_regmask;
52 RegMask Matcher::caller_save_regmask_exclude_soe;
53 RegMask Matcher::STACK_ONLY_mask;
54 RegMask Matcher::c_frame_ptr_mask;
55 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
56 const uint Matcher::_end_rematerialize = _END_REMATERIALIZE;
57
58 //---------------------------Matcher-------------------------------------------
59 Matcher::Matcher()
60 : PhaseTransform( Phase::Ins_Select ),
61 _states_arena(Chunk::medium_size, mtCompiler, Arena::Tag::tag_states),
62 _new_nodes(C->comp_arena()),
63 _visited(&_states_arena),
64 _shared(&_states_arena),
65 _dontcare(&_states_arena),
66 _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
67 _swallowed(swallowed),
68 _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
69 _end_inst_chain_rule(_END_INST_CHAIN_RULE),
70 _must_clone(must_clone),
71 _shared_nodes(C->comp_arena()),
72 #ifndef PRODUCT
73 _old2new_map(C->comp_arena()),
74 _new2old_map(C->comp_arena()),
75 _reused(C->comp_arena()),
76 #endif // !PRODUCT
77 _allocation_started(false),
78 _ruleName(ruleName),
79 _register_save_policy(register_save_policy),
80 _c_reg_save_policy(c_reg_save_policy),
81 _register_save_type(register_save_type),
82 _return_addr_mask(C->comp_arena()) {
83 C->set_matcher(this);
84
85 idealreg2spillmask [Op_RegI] = nullptr;
86 idealreg2spillmask [Op_RegN] = nullptr;
87 idealreg2spillmask [Op_RegL] = nullptr;
88 idealreg2spillmask [Op_RegF] = nullptr;
89 idealreg2spillmask [Op_RegD] = nullptr;
90 idealreg2spillmask [Op_RegP] = nullptr;
91 idealreg2spillmask [Op_VecA] = nullptr;
92 idealreg2spillmask [Op_VecS] = nullptr;
93 idealreg2spillmask [Op_VecD] = nullptr;
94 idealreg2spillmask [Op_VecX] = nullptr;
95 idealreg2spillmask [Op_VecY] = nullptr;
96 idealreg2spillmask [Op_VecZ] = nullptr;
97 idealreg2spillmask [Op_RegFlags] = nullptr;
98 idealreg2spillmask [Op_RegVectMask] = nullptr;
99
100 idealreg2debugmask [Op_RegI] = nullptr;
101 idealreg2debugmask [Op_RegN] = nullptr;
102 idealreg2debugmask [Op_RegL] = nullptr;
103 idealreg2debugmask [Op_RegF] = nullptr;
104 idealreg2debugmask [Op_RegD] = nullptr;
105 idealreg2debugmask [Op_RegP] = nullptr;
106 idealreg2debugmask [Op_VecA] = nullptr;
107 idealreg2debugmask [Op_VecS] = nullptr;
108 idealreg2debugmask [Op_VecD] = nullptr;
109 idealreg2debugmask [Op_VecX] = nullptr;
110 idealreg2debugmask [Op_VecY] = nullptr;
111 idealreg2debugmask [Op_VecZ] = nullptr;
112 idealreg2debugmask [Op_RegFlags] = nullptr;
113 idealreg2debugmask [Op_RegVectMask] = nullptr;
114
115 DEBUG_ONLY(_mem_node = nullptr;) // Ideal memory node consumed by mach node
116 }
117
118 //------------------------------warp_incoming_stk_arg------------------------
119 // This warps a VMReg into an OptoReg::Name
120 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
121 OptoReg::Name warped;
122 if( reg->is_stack() ) { // Stack slot argument?
123 warped = OptoReg::add(_old_SP, reg->reg2stack() );
124 warped = OptoReg::add(warped, C->out_preserve_stack_slots());
125 if( warped >= _in_arg_limit )
126 _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
127 return warped;
128 }
129 return OptoReg::as_OptoReg(reg);
130 }
131
132 //---------------------------compute_old_SP------------------------------------
133 OptoReg::Name Compile::compute_old_SP() {
134 int fixed = fixed_slots();
135 int preserve = in_preserve_stack_slots();
136 return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
137 }
138
139
140
141 #ifdef ASSERT
142 void Matcher::verify_new_nodes_only(Node* xroot) {
143 // Make sure that the new graph only references new nodes
144 ResourceMark rm;
145 Unique_Node_List worklist;
146 VectorSet visited;
147 worklist.push(xroot);
148 while (worklist.size() > 0) {
149 Node* n = worklist.pop();
150 if (visited.test_set(n->_idx)) {
151 continue;
152 }
153 assert(C->node_arena()->contains(n), "dead node");
154 assert(!n->is_Initialize() || n->as_Initialize()->number_of_projs(TypeFunc::Memory) == 1,
155 "after matching, Initialize should have a single memory projection");
156 for (uint j = 0; j < n->req(); j++) {
157 Node* in = n->in(j);
158 if (in != nullptr) {
159 worklist.push(in);
160 }
161 }
162 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
163 worklist.push(n->fast_out(j));
164 }
165 }
166 }
167 #endif
168
169 // Array of RegMask, one per returned values (inline type instances can
170 // be returned as multiple return values, one per field)
171 RegMask* Matcher::return_values_mask(const TypeFunc* tf) {
172 const TypeTuple* range = tf->range_cc();
173 uint cnt = range->cnt() - TypeFunc::Parms;
174 if (cnt == 0) {
175 return nullptr;
176 }
177 RegMask* mask = NEW_RESOURCE_ARRAY(RegMask, cnt);
178 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, cnt);
179 VMRegPair* vm_parm_regs = NEW_RESOURCE_ARRAY(VMRegPair, cnt);
180 for (uint i = 0; i < cnt; i++) {
181 sig_bt[i] = range->field_at(i+TypeFunc::Parms)->basic_type();
182 new (mask + i) RegMask();
183 }
184
185 int regs = SharedRuntime::java_return_convention(sig_bt, vm_parm_regs, cnt);
186 if (regs <= 0) {
187 // We ran out of registers to store the null marker for a nullable inline type return.
188 // Since it is only set in the 'call_epilog', we can simply put it on the stack.
189 assert(tf->returns_inline_type_as_fields(), "should have been tested during graph construction");
190 // TODO 8284443 Can we teach the register allocator to reserve a stack slot instead?
191 // mask[--cnt] = STACK_ONLY_mask does not work (test with -XX:+StressGCM)
192 int slot = C->fixed_slots() - 2;
193 if (C->needs_stack_repair()) {
194 slot -= 2; // Account for stack increment value
195 }
196 mask[--cnt].clear();
197 mask[cnt].insert(OptoReg::stack2reg(slot));
198 }
199 for (uint i = 0; i < cnt; i++) {
200 mask[i].clear();
201
202 OptoReg::Name reg1 = OptoReg::as_OptoReg(vm_parm_regs[i].first());
203 if (OptoReg::is_valid(reg1)) {
204 mask[i].insert(reg1);
205 }
206 OptoReg::Name reg2 = OptoReg::as_OptoReg(vm_parm_regs[i].second());
207 if (OptoReg::is_valid(reg2)) {
208 mask[i].insert(reg2);
209 }
210 }
211
212 return mask;
213 }
214
215 //---------------------------match---------------------------------------------
216 void Matcher::match( ) {
217 if( MaxLabelRootDepth < 100 ) { // Too small?
218 assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
219 MaxLabelRootDepth = 100;
220 }
221 // One-time initialization of some register masks.
222 init_spill_mask( C->root()->in(1) );
223 if (C->failing()) {
224 return;
225 }
226 assert(_return_addr_mask.is_empty(),
227 "return address mask must be empty initially");
228 _return_addr_mask.insert(return_addr());
229 #ifdef _LP64
230 // Pointers take 2 slots in 64-bit land
231 _return_addr_mask.insert(OptoReg::add(return_addr(), 1));
232 #endif
233
234 // Map Java-signature return types into return register-value
235 // machine registers.
236 _return_values_mask = return_values_mask(C->tf());
237
238 // ---------------
239 // Frame Layout
240
241 // Need the method signature to determine the incoming argument types,
242 // because the types determine which registers the incoming arguments are
243 // in, and this affects the matched code.
244 const TypeTuple *domain = C->tf()->domain_cc();
245 uint argcnt = domain->cnt() - TypeFunc::Parms;
246 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
247 VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
248 _parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
249 _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
250 uint i;
251 for( i = 0; i<argcnt; i++ ) {
252 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
253 new (_calling_convention_mask + i) RegMask(C->comp_arena());
254 }
255
256 // Pass array of ideal registers and length to USER code (from the AD file)
257 // that will convert this to an array of register numbers.
258 const StartNode *start = C->start();
259 start->calling_convention( sig_bt, vm_parm_regs, argcnt );
260 #ifdef ASSERT
261 // Sanity check users' calling convention. Real handy while trying to
262 // get the initial port correct.
263 { for (uint i = 0; i<argcnt; i++) {
264 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
265 assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
266 _parm_regs[i].set_bad();
267 continue;
268 }
269 VMReg parm_reg = vm_parm_regs[i].first();
270 assert(parm_reg->is_valid(), "invalid arg?");
271 if (parm_reg->is_reg()) {
272 OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
273 assert(can_be_java_arg(opto_parm_reg) ||
274 C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
275 opto_parm_reg == inline_cache_reg(),
276 "parameters in register must be preserved by runtime stubs");
277 }
278 for (uint j = 0; j < i; j++) {
279 assert(parm_reg != vm_parm_regs[j].first(),
280 "calling conv. must produce distinct regs");
281 }
282 }
283 }
284 #endif
285
286 // Do some initial frame layout.
287
288 // Compute the old incoming SP (may be called FP) as
289 // OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
290 _old_SP = C->compute_old_SP();
291 assert( is_even(_old_SP), "must be even" );
292
293 // Compute highest incoming stack argument as
294 // _old_SP + out_preserve_stack_slots + incoming argument size.
295 _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
296 assert( is_even(_in_arg_limit), "out_preserve must be even" );
297 for( i = 0; i < argcnt; i++ ) {
298 // Permit args to have no register
299 _calling_convention_mask[i].clear();
300 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
301 _parm_regs[i].set_bad();
302 continue;
303 }
304 // calling_convention returns stack arguments as a count of
305 // slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to
306 // the allocators point of view, taking into account all the
307 // preserve area, locks & pad2.
308
309 OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
310 if( OptoReg::is_valid(reg1))
311 _calling_convention_mask[i].insert(reg1);
312
313 OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
314 if( OptoReg::is_valid(reg2))
315 _calling_convention_mask[i].insert(reg2);
316
317 // Saved biased stack-slot register number
318 _parm_regs[i].set_pair(reg2, reg1);
319 }
320
321 // Allocated register sets are aligned to their size. Offsets to the stack
322 // pointer have to be aligned to the size of the access. For this _new_SP is
323 // aligned to the size of the largest register set with the stack alignment as
324 // limit and a minimum of SlotsPerLong (2).
325 int vector_aligment = MIN2(C->max_vector_size(), stack_alignment_in_bytes()) / VMRegImpl::stack_slot_size;
326 _new_SP = OptoReg::Name(align_up(_in_arg_limit, MAX2((int)RegMask::SlotsPerLong, vector_aligment)));
327
328 // Compute highest outgoing stack argument as
329 // _new_SP + out_preserve_stack_slots + max(outgoing argument size).
330 _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
331 assert( is_even(_out_arg_limit), "out_preserve must be even" );
332
333 // ---------------
334 // Collect roots of matcher trees. Every node for which
335 // _shared[_idx] is cleared is guaranteed to not be shared, and thus
336 // can be a valid interior of some tree.
337 find_shared( C->root() );
338 find_shared( C->top() );
339
340 C->print_method(PHASE_BEFORE_MATCHING, 1);
341
342 // Create new ideal node ConP #null even if it does exist in old space
343 // to avoid false sharing if the corresponding mach node is not used.
344 // The corresponding mach node is only used in rare cases for derived
345 // pointers.
346 Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
347
348 // Swap out to old-space; emptying new-space
349 Arena* old = C->swap_old_and_new();
350
351 // Save debug and profile information for nodes in old space:
352 _old_node_note_array = C->node_note_array();
353 if (_old_node_note_array != nullptr) {
354 C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
355 (C->comp_arena(), _old_node_note_array->length(),
356 0, nullptr));
357 }
358
359 // Pre-size the new_node table to avoid the need for range checks.
360 grow_new_node_array(C->unique());
361
362 // Reset node counter so MachNodes start with _idx at 0
363 int live_nodes = C->live_nodes();
364 C->set_unique(0);
365 C->reset_dead_node_list();
366
367 // Recursively match trees from old space into new space.
368 // Correct leaves of new-space Nodes; they point to old-space.
369 _visited.clear();
370 Node* const n = xform(C->top(), live_nodes);
371 if (C->failing()) return;
372 C->set_cached_top_node(n);
373 if (!C->failing()) {
374 Node* xroot = xform( C->root(), 1 );
375 if (C->failing()) return;
376 if (xroot == nullptr) {
377 Matcher::soft_match_failure(); // recursive matching process failed
378 assert(false, "instruction match failed");
379 C->record_method_not_compilable("instruction match failed");
380 } else {
381 // During matching shared constants were attached to C->root()
382 // because xroot wasn't available yet, so transfer the uses to
383 // the xroot.
384 for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
385 Node* n = C->root()->fast_out(j);
386 if (C->node_arena()->contains(n)) {
387 assert(n->in(0) == C->root(), "should be control user");
388 n->set_req(0, xroot);
389 --j;
390 --jmax;
391 }
392 }
393
394 // Generate new mach node for ConP #null
395 assert(new_ideal_null != nullptr, "sanity");
396 _mach_null = match_tree(new_ideal_null);
397 // Don't set control, it will confuse GCM since there are no uses.
398 // The control will be set when this node is used first time
399 // in find_base_for_derived().
400 assert(_mach_null != nullptr || C->failure_is_artificial(), ""); // bailouts are handled below.
401
402 C->set_root(xroot->is_Root() ? xroot->as_Root() : nullptr);
403
404 #ifdef ASSERT
405 verify_new_nodes_only(xroot);
406 #endif
407 }
408 }
409 if (C->top() == nullptr || C->root() == nullptr) {
410 // New graph lost. This is due to a compilation failure we encountered earlier.
411 stringStream ss;
412 if (C->failure_reason() != nullptr) {
413 ss.print("graph lost: %s", C->failure_reason());
414 } else {
415 assert(C->failure_reason() != nullptr, "graph lost: reason unknown");
416 ss.print("graph lost: reason unknown");
417 }
418 C->record_method_not_compilable(ss.as_string() DEBUG_ONLY(COMMA true));
419 }
420 if (C->failing()) {
421 // delete old;
422 old->destruct_contents();
423 return;
424 }
425 assert( C->top(), "" );
426 assert( C->root(), "" );
427 validate_null_checks();
428
429 // Now smoke old-space
430 NOT_DEBUG( old->destruct_contents() );
431
432 // ------------------------
433 // Set up save-on-entry registers.
434 Fixup_Save_On_Entry( );
435
436 { // Cleanup mach IR after selection phase is over.
437 Compile::TracePhase tp(_t_postselect_cleanup);
438 do_postselect_cleanup();
439 if (C->failing()) return;
440 assert(verify_after_postselect_cleanup(), "");
441 }
442 }
443
444 //------------------------------Fixup_Save_On_Entry----------------------------
445 // The stated purpose of this routine is to take care of save-on-entry
446 // registers. However, the overall goal of the Match phase is to convert into
447 // machine-specific instructions which have RegMasks to guide allocation.
448 // So what this procedure really does is put a valid RegMask on each input
449 // to the machine-specific variations of all Return, TailCall and Halt
450 // instructions. It also adds edgs to define the save-on-entry values (and of
451 // course gives them a mask).
452
453 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
454 RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
455 for (unsigned int i = 0; i < size; ++i) {
456 new (rms + i) RegMask(Compile::current()->comp_arena());
457 }
458 // Do all the pre-defined register masks
459 rms[TypeFunc::Control ].assignFrom(RegMask::EMPTY);
460 rms[TypeFunc::I_O ].assignFrom(RegMask::EMPTY);
461 rms[TypeFunc::Memory ].assignFrom(RegMask::EMPTY);
462 rms[TypeFunc::ReturnAdr].assignFrom(ret_adr);
463 rms[TypeFunc::FramePtr ].assignFrom(fp);
464 return rms;
465 }
466
467 int Matcher::scalable_predicate_reg_slots() {
468 assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(),
469 "scalable predicate vector should be supported");
470 int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte;
471 // We assume each predicate register is one-eighth of the size of
472 // scalable vector register, one mask bit per vector byte.
473 int predicate_reg_bit_size = vector_reg_bit_size >> 3;
474 // Compute number of slots which is required when scalable predicate
475 // register is spilled. E.g. if scalable vector register is 640 bits,
476 // predicate register is 80 bits, which is 2.5 * slots.
477 // We will round up the slot number to power of 2, which is required
478 // by find_first_set().
479 int slots = predicate_reg_bit_size & (BitsPerInt - 1)
480 ? (predicate_reg_bit_size >> LogBitsPerInt) + 1
481 : predicate_reg_bit_size >> LogBitsPerInt;
482 return round_up_power_of_2(slots);
483 }
484
485 #define NOF_STACK_MASKS (2*13)
486
487 // Create the initial stack mask used by values spilling to the stack.
488 // Disallow any debug info in outgoing argument areas by setting the
489 // initial mask accordingly.
490 void Matcher::init_first_stack_mask() {
491
492 // Allocate storage for spill masks as masks for the appropriate load type.
493 RegMask *rms = (RegMask*)C->comp_arena()->AmallocWords(sizeof(RegMask) * NOF_STACK_MASKS);
494
495 // Initialize empty placeholder masks into the newly allocated arena
496 for (int i = 0; i < NOF_STACK_MASKS; i++) {
497 new (rms + i) RegMask(C->comp_arena());
498 }
499
500 int index = 0;
501 for (int i = Op_RegN; i <= Op_RegVectMask; ++i) {
502 idealreg2spillmask[i] = &rms[index++];
503 idealreg2debugmask[i] = &rms[index++];
504 }
505 assert(index == NOF_STACK_MASKS, "wrong size");
506
507 // At first, start with the empty mask
508 C->FIRST_STACK_mask().clear();
509
510 // Add in the incoming argument area
511 OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
512 for (OptoReg::Name i = init_in; i < _in_arg_limit; i = OptoReg::add(i, 1)) {
513 C->FIRST_STACK_mask().insert(i);
514 }
515
516 // Add in all bits past the outgoing argument area
517 C->FIRST_STACK_mask().set_all_from(_out_arg_limit);
518
519 // Make spill masks. Registers for their class, plus FIRST_STACK_mask.
520 RegMask aligned_stack_mask(C->FIRST_STACK_mask(), C->comp_arena());
521 // Keep spill masks aligned.
522 aligned_stack_mask.clear_to_pairs();
523 assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
524 RegMask scalable_stack_mask(aligned_stack_mask, C->comp_arena());
525
526 idealreg2spillmask[Op_RegP]->assignFrom(*idealreg2regmask[Op_RegP]);
527 #ifdef _LP64
528 idealreg2spillmask[Op_RegN]->assignFrom(*idealreg2regmask[Op_RegN]);
529 idealreg2spillmask[Op_RegN]->or_with(C->FIRST_STACK_mask());
530 idealreg2spillmask[Op_RegP]->or_with(aligned_stack_mask);
531 #else
532 idealreg2spillmask[Op_RegP]->or_with(C->FIRST_STACK_mask());
533 #endif
534 idealreg2spillmask[Op_RegI]->assignFrom(*idealreg2regmask[Op_RegI]);
535 idealreg2spillmask[Op_RegI]->or_with(C->FIRST_STACK_mask());
536 idealreg2spillmask[Op_RegL]->assignFrom(*idealreg2regmask[Op_RegL]);
537 idealreg2spillmask[Op_RegL]->or_with(aligned_stack_mask);
538 idealreg2spillmask[Op_RegF]->assignFrom(*idealreg2regmask[Op_RegF]);
539 idealreg2spillmask[Op_RegF]->or_with(C->FIRST_STACK_mask());
540 idealreg2spillmask[Op_RegD]->assignFrom(*idealreg2regmask[Op_RegD]);
541 idealreg2spillmask[Op_RegD]->or_with(aligned_stack_mask);
542
543 if (Matcher::has_predicated_vectors()) {
544 idealreg2spillmask[Op_RegVectMask]->assignFrom(*idealreg2regmask[Op_RegVectMask]);
545 idealreg2spillmask[Op_RegVectMask]->or_with(aligned_stack_mask);
546 } else {
547 idealreg2spillmask[Op_RegVectMask]->assignFrom(RegMask::EMPTY);
548 }
549
550 if (Matcher::vector_size_supported(T_BYTE,4)) {
551 idealreg2spillmask[Op_VecS]->assignFrom(*idealreg2regmask[Op_VecS]);
552 idealreg2spillmask[Op_VecS]->or_with(C->FIRST_STACK_mask());
553 } else {
554 idealreg2spillmask[Op_VecS]->assignFrom(RegMask::EMPTY);
555 }
556
557 if (Matcher::vector_size_supported(T_FLOAT,2)) {
558 // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
559 // RA guarantees such alignment since it is needed for Double and Long values.
560 idealreg2spillmask[Op_VecD]->assignFrom(*idealreg2regmask[Op_VecD]);
561 idealreg2spillmask[Op_VecD]->or_with(aligned_stack_mask);
562 } else {
563 idealreg2spillmask[Op_VecD]->assignFrom(RegMask::EMPTY);
564 }
565
566 if (Matcher::vector_size_supported(T_FLOAT,4)) {
567 // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
568 //
569 // RA can use input arguments stack slots for spills but until RA
570 // we don't know frame size and offset of input arg stack slots.
571 //
572 // Exclude last input arg stack slots to avoid spilling vectors there
573 // otherwise vector spills could stomp over stack slots in caller frame.
574 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
575 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
576 aligned_stack_mask.remove(in);
577 in = OptoReg::add(in, -1);
578 }
579 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
580 assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
581 idealreg2spillmask[Op_VecX]->assignFrom(*idealreg2regmask[Op_VecX]);
582 idealreg2spillmask[Op_VecX]->or_with(aligned_stack_mask);
583 } else {
584 idealreg2spillmask[Op_VecX]->assignFrom(RegMask::EMPTY);
585 }
586
587 if (Matcher::vector_size_supported(T_FLOAT,8)) {
588 // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
589 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
590 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
591 aligned_stack_mask.remove(in);
592 in = OptoReg::add(in, -1);
593 }
594 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
595 assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
596 idealreg2spillmask[Op_VecY]->assignFrom(*idealreg2regmask[Op_VecY]);
597 idealreg2spillmask[Op_VecY]->or_with(aligned_stack_mask);
598 } else {
599 idealreg2spillmask[Op_VecY]->assignFrom(RegMask::EMPTY);
600 }
601
602 if (Matcher::vector_size_supported(T_FLOAT,16)) {
603 // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
604 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
605 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
606 aligned_stack_mask.remove(in);
607 in = OptoReg::add(in, -1);
608 }
609 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
610 assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
611 idealreg2spillmask[Op_VecZ]->assignFrom(*idealreg2regmask[Op_VecZ]);
612 idealreg2spillmask[Op_VecZ]->or_with(aligned_stack_mask);
613 } else {
614 idealreg2spillmask[Op_VecZ]->assignFrom(RegMask::EMPTY);
615 }
616
617 if (Matcher::supports_scalable_vector()) {
618 int k = 1;
619 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
620 if (Matcher::has_predicated_vectors()) {
621 // Exclude last input arg stack slots to avoid spilling vector register there,
622 // otherwise RegVectMask spills could stomp over stack slots in caller frame.
623 for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
624 scalable_stack_mask.remove(in);
625 in = OptoReg::add(in, -1);
626 }
627
628 // For RegVectMask
629 scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
630 assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack");
631 idealreg2spillmask[Op_RegVectMask]->assignFrom(*idealreg2regmask[Op_RegVectMask]);
632 idealreg2spillmask[Op_RegVectMask]->or_with(scalable_stack_mask);
633 }
634
635 // Exclude last input arg stack slots to avoid spilling vector register there,
636 // otherwise vector spills could stomp over stack slots in caller frame.
637 for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
638 scalable_stack_mask.remove(in);
639 in = OptoReg::add(in, -1);
640 }
641
642 // For VecA
643 scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
644 assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack");
645 idealreg2spillmask[Op_VecA]->assignFrom(*idealreg2regmask[Op_VecA]);
646 idealreg2spillmask[Op_VecA]->or_with(scalable_stack_mask);
647 } else {
648 idealreg2spillmask[Op_VecA]->assignFrom(RegMask::EMPTY);
649 }
650
651 if (UseFPUForSpilling) {
652 // This mask logic assumes that the spill operations are
653 // symmetric and that the registers involved are the same size.
654 // On sparc for instance we may have to use 64 bit moves will
655 // kill 2 registers when used with F0-F31.
656 idealreg2spillmask[Op_RegI]->or_with(*idealreg2regmask[Op_RegF]);
657 idealreg2spillmask[Op_RegF]->or_with(*idealreg2regmask[Op_RegI]);
658 #ifdef _LP64
659 idealreg2spillmask[Op_RegN]->or_with(*idealreg2regmask[Op_RegF]);
660 idealreg2spillmask[Op_RegL]->or_with(*idealreg2regmask[Op_RegD]);
661 idealreg2spillmask[Op_RegD]->or_with(*idealreg2regmask[Op_RegL]);
662 idealreg2spillmask[Op_RegP]->or_with(*idealreg2regmask[Op_RegD]);
663 #else
664 idealreg2spillmask[Op_RegP]->or_with(*idealreg2regmask[Op_RegF]);
665 #ifdef ARM
666 // ARM has support for moving 64bit values between a pair of
667 // integer registers and a double register
668 idealreg2spillmask[Op_RegL]->or_with(*idealreg2regmask[Op_RegD]);
669 idealreg2spillmask[Op_RegD]->or_with(*idealreg2regmask[Op_RegL]);
670 #endif
671 #endif
672 }
673
674 // Make up debug masks. Any spill slot plus callee-save (SOE) registers.
675 // Caller-save (SOC, AS) registers are assumed to be trashable by the various
676 // inline-cache fixup routines.
677 idealreg2debugmask[Op_RegN]->assignFrom(*idealreg2spillmask[Op_RegN]);
678 idealreg2debugmask[Op_RegI]->assignFrom(*idealreg2spillmask[Op_RegI]);
679 idealreg2debugmask[Op_RegL]->assignFrom(*idealreg2spillmask[Op_RegL]);
680 idealreg2debugmask[Op_RegF]->assignFrom(*idealreg2spillmask[Op_RegF]);
681 idealreg2debugmask[Op_RegD]->assignFrom(*idealreg2spillmask[Op_RegD]);
682 idealreg2debugmask[Op_RegP]->assignFrom(*idealreg2spillmask[Op_RegP]);
683 idealreg2debugmask[Op_RegVectMask]->assignFrom(*idealreg2spillmask[Op_RegVectMask]);
684
685 idealreg2debugmask[Op_VecA]->assignFrom(*idealreg2spillmask[Op_VecA]);
686 idealreg2debugmask[Op_VecS]->assignFrom(*idealreg2spillmask[Op_VecS]);
687 idealreg2debugmask[Op_VecD]->assignFrom(*idealreg2spillmask[Op_VecD]);
688 idealreg2debugmask[Op_VecX]->assignFrom(*idealreg2spillmask[Op_VecX]);
689 idealreg2debugmask[Op_VecY]->assignFrom(*idealreg2spillmask[Op_VecY]);
690 idealreg2debugmask[Op_VecZ]->assignFrom(*idealreg2spillmask[Op_VecZ]);
691
692 // Prevent stub compilations from attempting to reference
693 // callee-saved (SOE) registers from debug info
694 bool exclude_soe = !Compile::current()->is_method_compilation();
695 RegMask* caller_save_mask = exclude_soe ? &caller_save_regmask_exclude_soe : &caller_save_regmask;
696
697 idealreg2debugmask[Op_RegN]->subtract(*caller_save_mask);
698 idealreg2debugmask[Op_RegI]->subtract(*caller_save_mask);
699 idealreg2debugmask[Op_RegL]->subtract(*caller_save_mask);
700 idealreg2debugmask[Op_RegF]->subtract(*caller_save_mask);
701 idealreg2debugmask[Op_RegD]->subtract(*caller_save_mask);
702 idealreg2debugmask[Op_RegP]->subtract(*caller_save_mask);
703 idealreg2debugmask[Op_RegVectMask]->subtract(*caller_save_mask);
704
705 idealreg2debugmask[Op_VecA]->subtract(*caller_save_mask);
706 idealreg2debugmask[Op_VecS]->subtract(*caller_save_mask);
707 idealreg2debugmask[Op_VecD]->subtract(*caller_save_mask);
708 idealreg2debugmask[Op_VecX]->subtract(*caller_save_mask);
709 idealreg2debugmask[Op_VecY]->subtract(*caller_save_mask);
710 idealreg2debugmask[Op_VecZ]->subtract(*caller_save_mask);
711 }
712
713 //---------------------------is_save_on_entry----------------------------------
714 bool Matcher::is_save_on_entry(int reg) {
715 return
716 _register_save_policy[reg] == 'E' ||
717 _register_save_policy[reg] == 'A'; // Save-on-entry register?
718 }
719
720 //---------------------------Fixup_Save_On_Entry-------------------------------
721 void Matcher::Fixup_Save_On_Entry( ) {
722 init_first_stack_mask();
723
724 Node *root = C->root(); // Short name for root
725 // Count number of save-on-entry registers.
726 uint soe_cnt = number_of_saved_registers();
727 uint i;
728
729 // Find the procedure Start Node
730 StartNode *start = C->start();
731 assert( start, "Expect a start node" );
732
733 // Input RegMask array shared by all Returns.
734 // The type for doubles and longs has a count of 2, but
735 // there is only 1 returned value
736 uint ret_edge_cnt = C->tf()->range_cc()->cnt();
737 RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
738 for (i = TypeFunc::Parms; i < ret_edge_cnt; i++) {
739 ret_rms[i].assignFrom(_return_values_mask[i-TypeFunc::Parms]);
740 }
741
742 // Input RegMask array shared by all ForwardExceptions
743 uint forw_exc_edge_cnt = TypeFunc::Parms;
744 RegMask* forw_exc_rms = init_input_masks( forw_exc_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
745
746 // Input RegMask array shared by all Rethrows.
747 uint reth_edge_cnt = TypeFunc::Parms+1;
748 RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
749 // Rethrow takes exception oop only, but in the argument 0 slot.
750 OptoReg::Name reg = find_receiver();
751 if (reg >= 0) {
752 reth_rms[TypeFunc::Parms].assignFrom(mreg2regmask[reg]);
753 #ifdef _LP64
754 // Need two slots for ptrs in 64-bit land
755 reth_rms[TypeFunc::Parms].insert(OptoReg::add(OptoReg::Name(reg), 1));
756 #endif
757 }
758
759 // Input RegMask array shared by all TailCalls
760 uint tail_call_edge_cnt = TypeFunc::Parms+2;
761 RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
762
763 // Input RegMask array shared by all TailJumps
764 uint tail_jump_edge_cnt = TypeFunc::Parms+2;
765 RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
766
767 // TailCalls have 2 returned values (target & moop), whose masks come
768 // from the usual MachNode/MachOper mechanism. Find a sample
769 // TailCall to extract these masks and put the correct masks into
770 // the tail_call_rms array.
771 for( i=1; i < root->req(); i++ ) {
772 MachReturnNode *m = root->in(i)->as_MachReturn();
773 if( m->ideal_Opcode() == Op_TailCall ) {
774 tail_call_rms[TypeFunc::Parms + 0].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 0));
775 tail_call_rms[TypeFunc::Parms + 1].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 1));
776 break;
777 }
778 }
779
780 // TailJumps have 2 returned values (target & ex_oop), whose masks come
781 // from the usual MachNode/MachOper mechanism. Find a sample
782 // TailJump to extract these masks and put the correct masks into
783 // the tail_jump_rms array.
784 for( i=1; i < root->req(); i++ ) {
785 MachReturnNode *m = root->in(i)->as_MachReturn();
786 if( m->ideal_Opcode() == Op_TailJump ) {
787 tail_jump_rms[TypeFunc::Parms + 0].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 0));
788 tail_jump_rms[TypeFunc::Parms + 1].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 1));
789 break;
790 }
791 }
792
793 // Input RegMask array shared by all Halts
794 uint halt_edge_cnt = TypeFunc::Parms;
795 RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
796
797 // Capture the return input masks into each exit flavor
798 for( i=1; i < root->req(); i++ ) {
799 MachReturnNode *exit = root->in(i)->as_MachReturn();
800 switch( exit->ideal_Opcode() ) {
801 case Op_Return : exit->_in_rms = ret_rms; break;
802 case Op_Rethrow : exit->_in_rms = reth_rms; break;
803 case Op_TailCall : exit->_in_rms = tail_call_rms; break;
804 case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
805 case Op_ForwardException: exit->_in_rms = forw_exc_rms; break;
806 case Op_Halt : exit->_in_rms = halt_rms; break;
807 default : ShouldNotReachHere();
808 }
809 }
810
811 // Next unused projection number from Start.
812 int proj_cnt = C->tf()->domain_cc()->cnt();
813
814 // Do all the save-on-entry registers. Make projections from Start for
815 // them, and give them a use at the exit points. To the allocator, they
816 // look like incoming register arguments.
817 for( i = 0; i < _last_Mach_Reg; i++ ) {
818 if( is_save_on_entry(i) ) {
819
820 // Add the save-on-entry to the mask array
821 ret_rms [ ret_edge_cnt].assignFrom(mreg2regmask[i]);
822 reth_rms [ reth_edge_cnt].assignFrom(mreg2regmask[i]);
823 tail_call_rms[tail_call_edge_cnt].assignFrom(mreg2regmask[i]);
824 tail_jump_rms[tail_jump_edge_cnt].assignFrom(mreg2regmask[i]);
825 forw_exc_rms [ forw_exc_edge_cnt].assignFrom(mreg2regmask[i]);
826 // Halts need the SOE registers, but only in the stack as debug info.
827 // A just-prior uncommon-trap or deoptimization will use the SOE regs.
828 halt_rms [ halt_edge_cnt].assignFrom(*idealreg2spillmask[_register_save_type[i]]);
829
830 Node *mproj;
831
832 // Is this a RegF low half of a RegD? Double up 2 adjacent RegF's
833 // into a single RegD.
834 if( (i&1) == 0 &&
835 _register_save_type[i ] == Op_RegF &&
836 _register_save_type[i+1] == Op_RegF &&
837 is_save_on_entry(i+1) ) {
838 // Add other bit for double
839 ret_rms [ ret_edge_cnt].insert(OptoReg::Name(i+1));
840 reth_rms [ reth_edge_cnt].insert(OptoReg::Name(i+1));
841 tail_call_rms[tail_call_edge_cnt].insert(OptoReg::Name(i+1));
842 tail_jump_rms[tail_jump_edge_cnt].insert(OptoReg::Name(i+1));
843 forw_exc_rms [ forw_exc_edge_cnt].insert(OptoReg::Name(i+1));
844 halt_rms [ halt_edge_cnt].insert(OptoReg::Name(i+1));
845 mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
846 proj_cnt += 2; // Skip 2 for doubles
847 }
848 else if( (i&1) == 1 && // Else check for high half of double
849 _register_save_type[i-1] == Op_RegF &&
850 _register_save_type[i ] == Op_RegF &&
851 is_save_on_entry(i-1) ) {
852 ret_rms [ ret_edge_cnt].assignFrom(RegMask::EMPTY);
853 reth_rms [ reth_edge_cnt].assignFrom(RegMask::EMPTY);
854 tail_call_rms[tail_call_edge_cnt].assignFrom(RegMask::EMPTY);
855 tail_jump_rms[tail_jump_edge_cnt].assignFrom(RegMask::EMPTY);
856 forw_exc_rms [ forw_exc_edge_cnt].assignFrom(RegMask::EMPTY);
857 halt_rms [ halt_edge_cnt].assignFrom(RegMask::EMPTY);
858 mproj = C->top();
859 }
860 // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
861 // into a single RegL.
862 else if( (i&1) == 0 &&
863 _register_save_type[i ] == Op_RegI &&
864 _register_save_type[i+1] == Op_RegI &&
865 is_save_on_entry(i+1) ) {
866 // Add other bit for long
867 ret_rms [ ret_edge_cnt].insert(OptoReg::Name(i+1));
868 reth_rms [ reth_edge_cnt].insert(OptoReg::Name(i+1));
869 tail_call_rms[tail_call_edge_cnt].insert(OptoReg::Name(i+1));
870 tail_jump_rms[tail_jump_edge_cnt].insert(OptoReg::Name(i+1));
871 forw_exc_rms [ forw_exc_edge_cnt].insert(OptoReg::Name(i+1));
872 halt_rms [ halt_edge_cnt].insert(OptoReg::Name(i+1));
873 mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
874 proj_cnt += 2; // Skip 2 for longs
875 }
876 else if( (i&1) == 1 && // Else check for high half of long
877 _register_save_type[i-1] == Op_RegI &&
878 _register_save_type[i ] == Op_RegI &&
879 is_save_on_entry(i-1) ) {
880 ret_rms [ ret_edge_cnt].assignFrom(RegMask::EMPTY);
881 reth_rms [ reth_edge_cnt].assignFrom(RegMask::EMPTY);
882 tail_call_rms[tail_call_edge_cnt].assignFrom(RegMask::EMPTY);
883 tail_jump_rms[tail_jump_edge_cnt].assignFrom(RegMask::EMPTY);
884 forw_exc_rms [ forw_exc_edge_cnt].assignFrom(RegMask::EMPTY);
885 halt_rms [ halt_edge_cnt].assignFrom(RegMask::EMPTY);
886 mproj = C->top();
887 } else {
888 // Make a projection for it off the Start
889 mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
890 }
891
892 ret_edge_cnt ++;
893 reth_edge_cnt ++;
894 tail_call_edge_cnt ++;
895 tail_jump_edge_cnt ++;
896 forw_exc_edge_cnt++;
897 halt_edge_cnt ++;
898
899 // Add a use of the SOE register to all exit paths
900 for (uint j=1; j < root->req(); j++) {
901 root->in(j)->add_req(mproj);
902 }
903 } // End of if a save-on-entry register
904 } // End of for all machine registers
905 }
906
907 //------------------------------init_spill_mask--------------------------------
908 void Matcher::init_spill_mask( Node *ret ) {
909 if( idealreg2regmask[Op_RegI] ) return; // One time only init
910
911 OptoReg::c_frame_pointer = c_frame_pointer();
912 c_frame_ptr_mask.assignFrom(RegMask(c_frame_pointer()));
913 #ifdef _LP64
914 // pointers are twice as big
915 c_frame_ptr_mask.insert(OptoReg::add(c_frame_pointer(), 1));
916 #endif
917
918 // Start at OptoReg::stack0()
919 STACK_ONLY_mask.clear();
920 // STACK_ONLY_mask is all stack bits
921 STACK_ONLY_mask.set_all_from(OptoReg::stack2reg(0));
922
923 for (OptoReg::Name i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg);
924 i = OptoReg::add(i, 1)) {
925 // Copy the register names over into the shared world.
926 // SharedInfo::regName[i] = regName[i];
927 // Handy RegMasks per machine register
928 mreg2regmask[i].insert(i);
929
930 // Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks.
931 if (_register_save_policy[i] == 'C' ||
932 _register_save_policy[i] == 'A') {
933 caller_save_regmask.insert(i);
934 }
935 // Exclude save-on-entry registers from debug masks for stub compilations.
936 if (_register_save_policy[i] == 'C' ||
937 _register_save_policy[i] == 'A' ||
938 _register_save_policy[i] == 'E') {
939 caller_save_regmask_exclude_soe.insert(i);
940 }
941 }
942
943 // Grab the Frame Pointer
944 Node *fp = ret->in(TypeFunc::FramePtr);
945 // Share frame pointer while making spill ops
946 set_shared(fp);
947
948 // Get the ADLC notion of the right regmask, for each basic type.
949 #ifdef _LP64
950 idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
951 #endif
952 idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
953 idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
954 idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
955 idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
956 idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
957 idealreg2regmask[Op_VecA] = regmask_for_ideal_register(Op_VecA, ret);
958 idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
959 idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
960 idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
961 idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
962 idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
963 idealreg2regmask[Op_RegVectMask] = regmask_for_ideal_register(Op_RegVectMask, ret);
964 }
965
966 #ifdef ASSERT
967 static void match_alias_type(Compile* C, Node* n, Node* m) {
968 if (!VerifyAliases) return; // do not go looking for trouble by default
969 const TypePtr* nat = n->adr_type();
970 const TypePtr* mat = m->adr_type();
971 int nidx = C->get_alias_index(nat);
972 int midx = C->get_alias_index(mat);
973 // Detune the assert for cases like (AndI 0xFF (LoadB p)).
974 if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
975 for (uint i = 1; i < n->req(); i++) {
976 Node* n1 = n->in(i);
977 const TypePtr* n1at = n1->adr_type();
978 if (n1at != nullptr) {
979 nat = n1at;
980 nidx = C->get_alias_index(n1at);
981 }
982 }
983 }
984 // %%% Kludgery. Instead, fix ideal adr_type methods for all these cases:
985 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
986 switch (n->Opcode()) {
987 case Op_PrefetchAllocation:
988 nidx = Compile::AliasIdxRaw;
989 nat = TypeRawPtr::BOTTOM;
990 break;
991 }
992 }
993 if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
994 switch (n->Opcode()) {
995 case Op_ClearArray:
996 midx = Compile::AliasIdxRaw;
997 mat = TypeRawPtr::BOTTOM;
998 break;
999 }
1000 }
1001 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
1002 switch (n->Opcode()) {
1003 case Op_Return:
1004 case Op_Rethrow:
1005 case Op_Halt:
1006 case Op_TailCall:
1007 case Op_TailJump:
1008 case Op_ForwardException:
1009 nidx = Compile::AliasIdxBot;
1010 nat = TypePtr::BOTTOM;
1011 break;
1012 }
1013 }
1014 if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
1015 switch (n->Opcode()) {
1016 case Op_StrComp:
1017 case Op_StrEquals:
1018 case Op_StrIndexOf:
1019 case Op_StrIndexOfChar:
1020 case Op_AryEq:
1021 case Op_VectorizedHashCode:
1022 case Op_CountPositives:
1023 case Op_MemBarVolatile:
1024 case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
1025 case Op_StrInflatedCopy:
1026 case Op_StrCompressedCopy:
1027 case Op_OnSpinWait:
1028 case Op_EncodeISOArray:
1029 nidx = Compile::AliasIdxTop;
1030 nat = nullptr;
1031 break;
1032 }
1033 }
1034 if (nidx != midx) {
1035 if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
1036 tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
1037 n->dump();
1038 m->dump();
1039 }
1040 assert(C->subsume_loads() && C->must_alias(nat, midx),
1041 "must not lose alias info when matching");
1042 }
1043 }
1044 #endif
1045
1046 //------------------------------xform------------------------------------------
1047 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
1048 // Node in new-space. Given a new-space Node, recursively walk his children.
1049 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
1050 Node *Matcher::xform( Node *n, int max_stack ) {
1051 // Use one stack to keep both: child's node/state and parent's node/index
1052 MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
1053 mstack.push(n, Visit, nullptr, -1); // set null as parent to indicate root
1054 while (mstack.is_nonempty()) {
1055 C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1056 if (C->failing()) return nullptr;
1057 n = mstack.node(); // Leave node on stack
1058 Node_State nstate = mstack.state();
1059 if (nstate == Visit) {
1060 mstack.set_state(Post_Visit);
1061 Node *oldn = n;
1062 // Old-space or new-space check
1063 if (!C->node_arena()->contains(n)) {
1064 // Old space!
1065 Node* m = nullptr;
1066 if (has_new_node(n)) { // Not yet Label/Reduced
1067 m = new_node(n);
1068 } else {
1069 if (!is_dontcare(n)) { // Matcher can match this guy
1070 // Calls match special. They match alone with no children.
1071 // Their children, the incoming arguments, match normally.
1072 m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1073 if (C->failing()) return nullptr;
1074 if (m == nullptr) { Matcher::soft_match_failure(); return nullptr; }
1075 if (n->is_MemBar()) {
1076 m->as_MachMemBar()->set_adr_type(n->adr_type());
1077 }
1078 } else { // Nothing the matcher cares about
1079 if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Multi()) { // Projections?
1080 if (n->in(0)->is_Initialize() && n->as_Proj()->_con == TypeFunc::Memory) {
1081 // Initialize may have multiple NarrowMem projections. They would all match to identical raw mem MachProjs.
1082 // We don't need multiple MachProjs. Create one if none already exist, otherwise use existing one.
1083 m = n->in(0)->as_Initialize()->mem_mach_proj();
1084 if (m == nullptr && has_new_node(n->in(0))) {
1085 InitializeNode* new_init = new_node(n->in(0))->as_Initialize();
1086 m = new_init->mem_mach_proj();
1087 }
1088 assert(m == nullptr || m->is_MachProj(), "no mem projection yet or a MachProj created during matching");
1089 }
1090 if (m == nullptr) {
1091 // Convert to machine-dependent projection
1092 RegMask* mask = nullptr;
1093 if (n->in(0)->is_Call() && n->in(0)->as_Call()->tf()->returns_inline_type_as_fields()) {
1094 mask = return_values_mask(n->in(0)->as_Call()->tf());
1095 }
1096 m = n->in(0)->as_Multi()->match(n->as_Proj(), this, mask);
1097 NOT_PRODUCT(record_new2old(m, n);)
1098 }
1099 if (m->in(0) != nullptr) // m might be top
1100 collect_null_checks(m, n);
1101 } else { // Else just a regular 'ol guy
1102 m = n->clone(); // So just clone into new-space
1103 NOT_PRODUCT(record_new2old(m, n);)
1104 // Def-Use edges will be added incrementally as Uses
1105 // of this node are matched.
1106 assert(m->outcnt() == 0, "no Uses of this clone yet");
1107 }
1108 }
1109
1110 set_new_node(n, m); // Map old to new
1111 if (_old_node_note_array != nullptr) {
1112 Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1113 n->_idx);
1114 C->set_node_notes_at(m->_idx, nn);
1115 }
1116 DEBUG_ONLY(match_alias_type(C, n, m));
1117 }
1118 n = m; // n is now a new-space node
1119 mstack.set_node(n);
1120 }
1121
1122 // New space!
1123 if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1124
1125 int i;
1126 // Put precedence edges on stack first (match them last).
1127 for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1128 Node *m = oldn->in(i);
1129 if (m == nullptr) break;
1130 // set -1 to call add_prec() instead of set_req() during Step1
1131 mstack.push(m, Visit, n, -1);
1132 }
1133
1134 // Handle precedence edges for interior nodes
1135 for (i = n->len()-1; (uint)i >= n->req(); i--) {
1136 Node *m = n->in(i);
1137 if (m == nullptr || C->node_arena()->contains(m)) continue;
1138 n->rm_prec(i);
1139 // set -1 to call add_prec() instead of set_req() during Step1
1140 mstack.push(m, Visit, n, -1);
1141 }
1142
1143 // For constant debug info, I'd rather have unmatched constants.
1144 int cnt = n->req();
1145 JVMState* jvms = n->jvms();
1146 int debug_cnt = jvms ? jvms->debug_start() : cnt;
1147
1148 // Now do only debug info. Clone constants rather than matching.
1149 // Constants are represented directly in the debug info without
1150 // the need for executable machine instructions.
1151 // Monitor boxes are also represented directly.
1152 for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1153 Node *m = n->in(i); // Get input
1154 int op = m->Opcode();
1155 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1156 if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1157 op == Op_ConF || op == Op_ConD || op == Op_ConL
1158 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
1159 ) {
1160 m = m->clone();
1161 NOT_PRODUCT(record_new2old(m, n));
1162 mstack.push(m, Post_Visit, n, i); // Don't need to visit
1163 mstack.push(m->in(0), Visit, m, 0);
1164 } else {
1165 mstack.push(m, Visit, n, i);
1166 }
1167 }
1168
1169 // And now walk his children, and convert his inputs to new-space.
1170 for( ; i >= 0; --i ) { // For all normal inputs do
1171 Node *m = n->in(i); // Get input
1172 if(m != nullptr)
1173 mstack.push(m, Visit, n, i);
1174 }
1175
1176 }
1177 else if (nstate == Post_Visit) {
1178 // Set xformed input
1179 Node *p = mstack.parent();
1180 if (p != nullptr) { // root doesn't have parent
1181 int i = (int)mstack.index();
1182 if (i >= 0)
1183 p->set_req(i, n); // required input
1184 else if (i == -1)
1185 p->add_prec(n); // precedence input
1186 else
1187 ShouldNotReachHere();
1188 }
1189 mstack.pop(); // remove processed node from stack
1190 }
1191 else {
1192 ShouldNotReachHere();
1193 }
1194 } // while (mstack.is_nonempty())
1195 return n; // Return new-space Node
1196 }
1197
1198 //------------------------------warp_outgoing_stk_arg------------------------
1199 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1200 // Convert outgoing argument location to a pre-biased stack offset
1201 if (reg->is_stack()) {
1202 OptoReg::Name warped = reg->reg2stack();
1203 // Adjust the stack slot offset to be the register number used
1204 // by the allocator.
1205 warped = OptoReg::add(begin_out_arg_area, warped);
1206 // Keep track of the largest numbered stack slot used for an arg.
1207 // Largest used slot per call-site indicates the amount of stack
1208 // that is killed by the call.
1209 if (warped >= out_arg_limit_per_call) {
1210 out_arg_limit_per_call = OptoReg::add(warped, 1);
1211 }
1212 return warped;
1213 }
1214 return OptoReg::as_OptoReg(reg);
1215 }
1216
1217
1218 //------------------------------match_sfpt-------------------------------------
1219 // Helper function to match call instructions. Calls match special.
1220 // They match alone with no children. Their children, the incoming
1221 // arguments, match normally.
1222 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1223 MachSafePointNode *msfpt = nullptr;
1224 MachCallNode *mcall = nullptr;
1225 uint cnt;
1226 // Split out case for SafePoint vs Call
1227 CallNode *call;
1228 const TypeTuple *domain;
1229 ciMethod* method = nullptr;
1230 if( sfpt->is_Call() ) {
1231 call = sfpt->as_Call();
1232 domain = call->tf()->domain_cc();
1233 cnt = domain->cnt();
1234
1235 // Match just the call, nothing else
1236 MachNode *m = match_tree(call);
1237 if (C->failing()) return nullptr;
1238 if( m == nullptr ) { Matcher::soft_match_failure(); return nullptr; }
1239
1240 // Copy data from the Ideal SafePoint to the machine version
1241 mcall = m->as_MachCall();
1242
1243 mcall->set_tf( call->tf());
1244 mcall->set_entry_point( call->entry_point());
1245 mcall->set_cnt( call->cnt());
1246 mcall->set_guaranteed_safepoint(call->guaranteed_safepoint());
1247
1248 if( mcall->is_MachCallJava() ) {
1249 MachCallJavaNode *mcall_java = mcall->as_MachCallJava();
1250 const CallJavaNode *call_java = call->as_CallJava();
1251 assert(call_java->validate_symbolic_info(), "inconsistent info");
1252 method = call_java->method();
1253 mcall_java->_method = method;
1254 mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1255 mcall_java->_override_symbolic_info = call_java->override_symbolic_info();
1256 mcall_java->_arg_escape = call_java->arg_escape();
1257 if( mcall_java->is_MachCallStaticJava() )
1258 mcall_java->as_MachCallStaticJava()->_name =
1259 call_java->as_CallStaticJava()->_name;
1260 if( mcall_java->is_MachCallDynamicJava() )
1261 mcall_java->as_MachCallDynamicJava()->_vtable_index =
1262 call_java->as_CallDynamicJava()->_vtable_index;
1263 }
1264 else if( mcall->is_MachCallRuntime() ) {
1265 MachCallRuntimeNode* mach_call_rt = mcall->as_MachCallRuntime();
1266 mach_call_rt->_name = call->as_CallRuntime()->_name;
1267 mach_call_rt->_leaf_no_fp = call->is_CallLeafNoFP();
1268 }
1269 msfpt = mcall;
1270 }
1271 // This is a non-call safepoint
1272 else {
1273 call = nullptr;
1274 domain = nullptr;
1275 MachNode *mn = match_tree(sfpt);
1276 if (C->failing()) return nullptr;
1277 msfpt = mn->as_MachSafePoint();
1278 cnt = TypeFunc::Parms;
1279 }
1280 msfpt->_has_ea_local_in_scope = sfpt->has_ea_local_in_scope();
1281
1282 // Advertise the correct memory effects (for anti-dependence computation).
1283 msfpt->set_adr_type(sfpt->adr_type());
1284
1285 // Allocate a private array of RegMasks. These RegMasks are not shared.
1286 msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1287 // Empty them all.
1288 for (uint i = 0; i < cnt; i++) {
1289 ::new (msfpt->_in_rms + i) RegMask(C->comp_arena());
1290 }
1291
1292 // Do all the pre-defined non-Empty register masks
1293 msfpt->_in_rms[TypeFunc::ReturnAdr].assignFrom(_return_addr_mask);
1294 msfpt->_in_rms[TypeFunc::FramePtr ].assignFrom(c_frame_ptr_mask);
1295
1296 // Place first outgoing argument can possibly be put.
1297 OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1298 assert( is_even(begin_out_arg_area), "" );
1299 // Compute max outgoing register number per call site.
1300 OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1301 // Calls to C may hammer extra stack slots above and beyond any arguments.
1302 // These are usually backing store for register arguments for varargs.
1303 if( call != nullptr && call->is_CallRuntime() )
1304 out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1305
1306
1307 // Do the normal argument list (parameters) register masks
1308 // Null entry point is a special cast where the target of the call
1309 // is in a register.
1310 int adj = (call != nullptr && call->entry_point() == nullptr) ? 1 : 0;
1311 int argcnt = cnt - TypeFunc::Parms - adj;
1312 if( argcnt > 0 ) { // Skip it all if we have no args
1313 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1314 VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1315 int i;
1316 for( i = 0; i < argcnt; i++ ) {
1317 sig_bt[i] = domain->field_at(i+TypeFunc::Parms+adj)->basic_type();
1318 }
1319 // V-call to pick proper calling convention
1320 call->calling_convention( sig_bt, parm_regs, argcnt );
1321
1322 #ifdef ASSERT
1323 // Sanity check users' calling convention. Really handy during
1324 // the initial porting effort. Fairly expensive otherwise.
1325 { for (int i = 0; i<argcnt; i++) {
1326 if( !parm_regs[i].first()->is_valid() &&
1327 !parm_regs[i].second()->is_valid() ) continue;
1328 VMReg reg1 = parm_regs[i].first();
1329 VMReg reg2 = parm_regs[i].second();
1330 for (int j = 0; j < i; j++) {
1331 if( !parm_regs[j].first()->is_valid() &&
1332 !parm_regs[j].second()->is_valid() ) continue;
1333 VMReg reg3 = parm_regs[j].first();
1334 VMReg reg4 = parm_regs[j].second();
1335 if( !reg1->is_valid() ) {
1336 assert( !reg2->is_valid(), "valid halvsies" );
1337 } else if( !reg3->is_valid() ) {
1338 assert( !reg4->is_valid(), "valid halvsies" );
1339 } else {
1340 assert( reg1 != reg2, "calling conv. must produce distinct regs");
1341 assert( reg1 != reg3, "calling conv. must produce distinct regs");
1342 assert( reg1 != reg4, "calling conv. must produce distinct regs");
1343 assert( reg2 != reg3, "calling conv. must produce distinct regs");
1344 assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1345 assert( reg3 != reg4, "calling conv. must produce distinct regs");
1346 }
1347 }
1348 }
1349 }
1350 #endif
1351
1352 // Visit each argument. Compute its outgoing register mask.
1353 // Return results now can have 2 bits returned.
1354 // Compute max over all outgoing arguments both per call-site
1355 // and over the entire method.
1356 for( i = 0; i < argcnt; i++ ) {
1357 // Address of incoming argument mask to fill in
1358 RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms+adj];
1359 VMReg first = parm_regs[i].first();
1360 VMReg second = parm_regs[i].second();
1361 if(!first->is_valid() &&
1362 !second->is_valid()) {
1363 continue; // Avoid Halves
1364 }
1365 // Handle case where arguments are in vector registers.
1366 if(call->in(TypeFunc::Parms + i)->bottom_type()->isa_vect()) {
1367 OptoReg::Name reg_fst = OptoReg::as_OptoReg(first);
1368 OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
1369 assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd);
1370 for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
1371 rm->insert(r);
1372 }
1373 }
1374 // Grab first register, adjust stack slots and insert in mask.
1375 OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call );
1376 if (OptoReg::is_valid(reg1)) {
1377 rm->insert( reg1 );
1378 }
1379 // Grab second register (if any), adjust stack slots and insert in mask.
1380 OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call );
1381 if (OptoReg::is_valid(reg2)) {
1382 rm->insert( reg2 );
1383 }
1384 } // End of for all arguments
1385 }
1386
1387 // Compute the max stack slot killed by any call. These will not be
1388 // available for debug info, and will be used to adjust FIRST_STACK_mask
1389 // after all call sites have been visited.
1390 if( _out_arg_limit < out_arg_limit_per_call)
1391 _out_arg_limit = out_arg_limit_per_call;
1392
1393 if (mcall) {
1394 // Kill the outgoing argument area, including any non-argument holes and
1395 // any legacy C-killed slots. Use Fat-Projections to do the killing.
1396 // Since the max-per-method covers the max-per-call-site and debug info
1397 // is excluded on the max-per-method basis, debug info cannot land in
1398 // this killed area.
1399 uint r_cnt = mcall->tf()->range_sig()->cnt();
1400 MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::EMPTY, MachProjNode::fat_proj );
1401 for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++) {
1402 proj->_rout.insert(OptoReg::Name(i));
1403 }
1404 if (!proj->_rout.is_empty()) {
1405 push_projection(proj);
1406 }
1407 }
1408 // Transfer the safepoint information from the call to the mcall
1409 // Move the JVMState list
1410 msfpt->set_jvms(sfpt->jvms());
1411 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1412 jvms->set_map(sfpt);
1413 }
1414
1415 // Debug inputs begin just after the last incoming parameter
1416 assert((mcall == nullptr) || (mcall->jvms() == nullptr) ||
1417 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain_cc()->cnt()), "");
1418
1419 // Add additional edges.
1420 if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1421 // For these calls we can not add MachConstantBase in expand(), as the
1422 // ins are not complete then.
1423 msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1424 if (msfpt->jvms() &&
1425 msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1426 // We added an edge before jvms, so we must adapt the position of the ins.
1427 msfpt->jvms()->adapt_position(+1);
1428 }
1429 }
1430
1431 // Registers killed by the call are set in the local scheduling pass
1432 // of Global Code Motion.
1433 return msfpt;
1434 }
1435
1436 //---------------------------match_tree----------------------------------------
1437 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part
1438 // of the whole-sale conversion from Ideal to Mach Nodes. Also used for
1439 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1440 // a Load's result RegMask for memoization in idealreg2regmask[]
1441 MachNode *Matcher::match_tree( const Node *n ) {
1442 assert( n->Opcode() != Op_Phi, "cannot match" );
1443 assert( !n->is_block_start(), "cannot match" );
1444 // Set the mark for all locally allocated State objects.
1445 // When this call returns, the _states_arena arena will be reset
1446 // freeing all State objects.
1447 ResourceMark rm( &_states_arena );
1448
1449 LabelRootDepth = 0;
1450
1451 // StoreNodes require their Memory input to match any LoadNodes
1452 Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1453 #ifdef ASSERT
1454 Node* save_mem_node = _mem_node;
1455 _mem_node = n->is_Store() ? (Node*)n : nullptr;
1456 #endif
1457 // State object for root node of match tree
1458 // Allocate it on _states_arena - stack allocation can cause stack overflow.
1459 State *s = new (&_states_arena) State;
1460 s->_kids[0] = nullptr;
1461 s->_kids[1] = nullptr;
1462 s->_leaf = (Node*)n;
1463 // Label the input tree, allocating labels from top-level arena
1464 Node* root_mem = mem;
1465 Label_Root(n, s, n->in(0), root_mem);
1466 if (C->failing()) return nullptr;
1467
1468 // The minimum cost match for the whole tree is found at the root State
1469 uint mincost = max_juint;
1470 uint cost = max_juint;
1471 uint i;
1472 for (i = 0; i < NUM_OPERANDS; i++) {
1473 if (s->valid(i) && // valid entry and
1474 s->cost(i) < cost && // low cost and
1475 s->rule(i) >= NUM_OPERANDS) {// not an operand
1476 mincost = i;
1477 cost = s->cost(i);
1478 }
1479 }
1480 if (mincost == max_juint) {
1481 #ifndef PRODUCT
1482 tty->print("No matching rule for:");
1483 s->dump();
1484 #endif
1485 Matcher::soft_match_failure();
1486 return nullptr;
1487 }
1488 // Reduce input tree based upon the state labels to machine Nodes
1489 MachNode *m = ReduceInst(s, s->rule(mincost), mem);
1490 // New-to-old mapping is done in ReduceInst, to cover complex instructions.
1491 NOT_PRODUCT(_old2new_map.map(n->_idx, m);)
1492
1493 // Add any Matcher-ignored edges
1494 uint cnt = n->req();
1495 uint start = 1;
1496 if( mem != (Node*)1 ) start = MemNode::Memory+1;
1497 if( n->is_AddP() ) {
1498 assert( mem == (Node*)1, "" );
1499 start = AddPNode::Base+1;
1500 }
1501 for( i = start; i < cnt; i++ ) {
1502 if( !n->match_edge(i) ) {
1503 if( i < m->req() )
1504 m->ins_req( i, n->in(i) );
1505 else
1506 m->add_req( n->in(i) );
1507 }
1508 }
1509
1510 DEBUG_ONLY( _mem_node = save_mem_node; )
1511 return m;
1512 }
1513
1514
1515 //------------------------------match_into_reg---------------------------------
1516 // Choose to either match this Node in a register or part of the current
1517 // match tree. Return true for requiring a register and false for matching
1518 // as part of the current match tree.
1519 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1520
1521 const Type *t = m->bottom_type();
1522
1523 if (t->singleton()) {
1524 // Never force constants into registers. Allow them to match as
1525 // constants or registers. Copies of the same value will share
1526 // the same register. See find_shared_node.
1527 return false;
1528 } else { // Not a constant
1529 if (!shared && Matcher::is_encode_and_store_pattern(n, m)) {
1530 // Make it possible to match "encode and store" patterns with non-shared
1531 // encode operations that are pinned to a control node (e.g. by CastPP
1532 // node removal in final graph reshaping). The matched instruction cannot
1533 // float above the encode's control node because it is pinned to the
1534 // store's control node.
1535 return false;
1536 }
1537 // Stop recursion if they have different Controls.
1538 Node* m_control = m->in(0);
1539 // Control of load's memory can post-dominates load's control.
1540 // So use it since load can't float above its memory.
1541 Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : nullptr;
1542 if (control && m_control && control != m_control && control != mem_control) {
1543
1544 // Actually, we can live with the most conservative control we
1545 // find, if it post-dominates the others. This allows us to
1546 // pick up load/op/store trees where the load can float a little
1547 // above the store.
1548 Node *x = control;
1549 const uint max_scan = 6; // Arbitrary scan cutoff
1550 uint j;
1551 for (j=0; j<max_scan; j++) {
1552 if (x->is_Region()) // Bail out at merge points
1553 return true;
1554 x = x->in(0);
1555 if (x == m_control) // Does 'control' post-dominate
1556 break; // m->in(0)? If so, we can use it
1557 if (x == mem_control) // Does 'control' post-dominate
1558 break; // mem_control? If so, we can use it
1559 }
1560 if (j == max_scan) // No post-domination before scan end?
1561 return true; // Then break the match tree up
1562 }
1563 if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1564 (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1565 // These are commonly used in address expressions and can
1566 // efficiently fold into them on X64 in some cases.
1567 return false;
1568 }
1569 }
1570
1571 // Not forceable cloning. If shared, put it into a register.
1572 return shared;
1573 }
1574
1575
1576 //------------------------------Instruction Selection--------------------------
1577 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1578 // ideal nodes to machine instructions. Trees are delimited by shared Nodes,
1579 // things the Matcher does not match (e.g., Memory), and things with different
1580 // Controls (hence forced into different blocks). We pass in the Control
1581 // selected for this entire State tree.
1582
1583 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1584 // Store and the Load must have identical Memories (as well as identical
1585 // pointers). Since the Matcher does not have anything for Memory (and
1586 // does not handle DAGs), I have to match the Memory input myself. If the
1587 // Tree root is a Store or if there are multiple Loads in the tree, I require
1588 // all Loads to have the identical memory.
1589 Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) {
1590 // Since Label_Root is a recursive function, its possible that we might run
1591 // out of stack space. See bugs 6272980 & 6227033 for more info.
1592 LabelRootDepth++;
1593 if (LabelRootDepth > MaxLabelRootDepth) {
1594 // Bailout. Can for example be hit with a deep chain of operations.
1595 C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
1596 return nullptr;
1597 }
1598 uint care = 0; // Edges matcher cares about
1599 uint cnt = n->req();
1600 uint i = 0;
1601
1602 // Examine children for memory state
1603 // Can only subsume a child into your match-tree if that child's memory state
1604 // is not modified along the path to another input.
1605 // It is unsafe even if the other inputs are separate roots.
1606 Node *input_mem = nullptr;
1607 for( i = 1; i < cnt; i++ ) {
1608 if( !n->match_edge(i) ) continue;
1609 Node *m = n->in(i); // Get ith input
1610 assert( m, "expect non-null children" );
1611 if( m->is_Load() ) {
1612 if( input_mem == nullptr ) {
1613 input_mem = m->in(MemNode::Memory);
1614 if (mem == (Node*)1) {
1615 // Save this memory to bail out if there's another memory access
1616 // to a different memory location in the same tree.
1617 mem = input_mem;
1618 }
1619 } else if( input_mem != m->in(MemNode::Memory) ) {
1620 input_mem = NodeSentinel;
1621 }
1622 }
1623 }
1624
1625 for( i = 1; i < cnt; i++ ){// For my children
1626 if( !n->match_edge(i) ) continue;
1627 Node *m = n->in(i); // Get ith input
1628 // Allocate states out of a private arena
1629 State *s = new (&_states_arena) State;
1630 svec->_kids[care++] = s;
1631 assert( care <= 2, "binary only for now" );
1632
1633 // Recursively label the State tree.
1634 s->_kids[0] = nullptr;
1635 s->_kids[1] = nullptr;
1636 s->_leaf = m;
1637
1638 // Check for leaves of the State Tree; things that cannot be a part of
1639 // the current tree. If it finds any, that value is matched as a
1640 // register operand. If not, then the normal matching is used.
1641 if( match_into_reg(n, m, control, i, is_shared(m)) ||
1642 // Stop recursion if this is a LoadNode and there is another memory access
1643 // to a different memory location in the same tree (for example, a StoreNode
1644 // at the root of this tree or another LoadNode in one of the children).
1645 ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1646 // Can NOT include the match of a subtree when its memory state
1647 // is used by any of the other subtrees
1648 (input_mem == NodeSentinel) ) {
1649 // Print when we exclude matching due to different memory states at input-loads
1650 if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1651 && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1652 tty->print_cr("invalid input_mem");
1653 }
1654 // Switch to a register-only opcode; this value must be in a register
1655 // and cannot be subsumed as part of a larger instruction.
1656 s->DFA( m->ideal_reg(), m );
1657
1658 } else {
1659 // If match tree has no control and we do, adopt it for entire tree
1660 if( control == nullptr && m->in(0) != nullptr && m->req() > 1 )
1661 control = m->in(0); // Pick up control
1662 // Else match as a normal part of the match tree.
1663 control = Label_Root(m, s, control, mem);
1664 if (C->failing()) return nullptr;
1665 }
1666 }
1667
1668 // Call DFA to match this node, and return
1669 svec->DFA( n->Opcode(), n );
1670
1671 uint x;
1672 for( x = 0; x < _LAST_MACH_OPER; x++ )
1673 if( svec->valid(x) )
1674 break;
1675
1676 if (x >= _LAST_MACH_OPER) {
1677 #ifdef ASSERT
1678 n->dump();
1679 svec->dump();
1680 #endif
1681 assert( false, "bad AD file" );
1682 C->record_failure("bad AD file");
1683 }
1684 return control;
1685 }
1686
1687
1688 // Con nodes reduced using the same rule can share their MachNode
1689 // which reduces the number of copies of a constant in the final
1690 // program. The register allocator is free to split uses later to
1691 // split live ranges.
1692 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1693 if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return nullptr;
1694
1695 // See if this Con has already been reduced using this rule.
1696 if (_shared_nodes.max() <= leaf->_idx) return nullptr;
1697 MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1698 if (last != nullptr && rule == last->rule()) {
1699 // Don't expect control change for DecodeN
1700 if (leaf->is_DecodeNarrowPtr())
1701 return last;
1702 // Get the new space root.
1703 Node* xroot = new_node(C->root());
1704 if (xroot == nullptr) {
1705 // This shouldn't happen give the order of matching.
1706 return nullptr;
1707 }
1708
1709 // Shared constants need to have their control be root so they
1710 // can be scheduled properly.
1711 Node* control = last->in(0);
1712 if (control != xroot) {
1713 if (control == nullptr || control == C->root()) {
1714 last->set_req(0, xroot);
1715 } else {
1716 assert(false, "unexpected control");
1717 return nullptr;
1718 }
1719 }
1720 return last;
1721 }
1722 return nullptr;
1723 }
1724
1725
1726 //------------------------------ReduceInst-------------------------------------
1727 // Reduce a State tree (with given Control) into a tree of MachNodes.
1728 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1729 // complicated machine Nodes. Each MachNode covers some tree of Ideal Nodes.
1730 // Each MachNode has a number of complicated MachOper operands; each
1731 // MachOper also covers a further tree of Ideal Nodes.
1732
1733 // The root of the Ideal match tree is always an instruction, so we enter
1734 // the recursion here. After building the MachNode, we need to recurse
1735 // the tree checking for these cases:
1736 // (1) Child is an instruction -
1737 // Build the instruction (recursively), add it as an edge.
1738 // Build a simple operand (register) to hold the result of the instruction.
1739 // (2) Child is an interior part of an instruction -
1740 // Skip over it (do nothing)
1741 // (3) Child is the start of a operand -
1742 // Build the operand, place it inside the instruction
1743 // Call ReduceOper.
1744 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1745 assert( rule >= NUM_OPERANDS, "called with operand rule" );
1746
1747 MachNode* shared_node = find_shared_node(s->_leaf, rule);
1748 if (shared_node != nullptr) {
1749 return shared_node;
1750 }
1751
1752 // Build the object to represent this state & prepare for recursive calls
1753 MachNode *mach = s->MachNodeGenerator(rule);
1754 guarantee(mach != nullptr, "Missing MachNode");
1755 mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
1756 assert( mach->_opnds[0] != nullptr, "Missing result operand" );
1757 Node *leaf = s->_leaf;
1758 NOT_PRODUCT(record_new2old(mach, leaf);)
1759 // Check for instruction or instruction chain rule
1760 if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1761 assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1762 "duplicating node that's already been matched");
1763 // Instruction
1764 mach->add_req( leaf->in(0) ); // Set initial control
1765 // Reduce interior of complex instruction
1766 ReduceInst_Interior( s, rule, mem, mach, 1 );
1767 } else {
1768 // Instruction chain rules are data-dependent on their inputs
1769 mach->add_req(nullptr); // Set initial control to none
1770 ReduceInst_Chain_Rule( s, rule, mem, mach );
1771 }
1772
1773 // If a Memory was used, insert a Memory edge
1774 if( mem != (Node*)1 ) {
1775 mach->ins_req(MemNode::Memory,mem);
1776 #ifdef ASSERT
1777 // Verify adr type after matching memory operation
1778 const MachOper* oper = mach->memory_operand();
1779 if (oper != nullptr && oper != (MachOper*)-1) {
1780 // It has a unique memory operand. Find corresponding ideal mem node.
1781 Node* m = nullptr;
1782 if (leaf->is_Mem()) {
1783 m = leaf;
1784 } else {
1785 m = _mem_node;
1786 assert(m != nullptr && m->is_Mem(), "expecting memory node");
1787 }
1788 const Type* mach_at = mach->adr_type();
1789 // DecodeN node consumed by an address may have different type
1790 // than its input. Don't compare types for such case.
1791 if (m->adr_type() != mach_at &&
1792 (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1793 (m->in(MemNode::Address)->is_AddP() &&
1794 m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()) ||
1795 (m->in(MemNode::Address)->is_AddP() &&
1796 m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1797 m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()))) {
1798 mach_at = m->adr_type();
1799 }
1800 if (m->adr_type() != mach_at) {
1801 m->dump();
1802 tty->print_cr("mach:");
1803 mach->dump(1);
1804 }
1805 assert(m->adr_type() == mach_at, "matcher should not change adr type");
1806 }
1807 #endif
1808 }
1809
1810 // If the _leaf is an AddP, insert the base edge
1811 if (leaf->is_AddP()) {
1812 mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1813 }
1814
1815 uint number_of_projections_prior = number_of_projections();
1816
1817 // Perform any 1-to-many expansions required
1818 MachNode *ex = mach->Expand(s, _projection_list, mem);
1819 if (ex != mach) {
1820 assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1821 if( ex->in(1)->is_Con() )
1822 ex->in(1)->set_req(0, C->root());
1823 // Remove old node from the graph
1824 for( uint i=0; i<mach->req(); i++ ) {
1825 mach->set_req(i,nullptr);
1826 }
1827 NOT_PRODUCT(record_new2old(ex, s->_leaf);)
1828 }
1829
1830 // PhaseChaitin::fixup_spills will sometimes generate spill code
1831 // via the matcher. By the time, nodes have been wired into the CFG,
1832 // and any further nodes generated by expand rules will be left hanging
1833 // in space, and will not get emitted as output code. Catch this.
1834 // Also, catch any new register allocation constraints ("projections")
1835 // generated belatedly during spill code generation.
1836 if (_allocation_started) {
1837 guarantee(ex == mach, "no expand rules during spill generation");
1838 guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1839 }
1840
1841 if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1842 // Record the con for sharing
1843 _shared_nodes.map(leaf->_idx, ex);
1844 }
1845
1846 // Have mach nodes inherit GC barrier data
1847 mach->set_barrier_data(MemNode::barrier_data(leaf));
1848
1849 return ex;
1850 }
1851
1852 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1853 for (uint i = n->req(); i < n->len(); i++) {
1854 if (n->in(i) != nullptr) {
1855 mach->add_prec(n->in(i));
1856 }
1857 }
1858 }
1859
1860 void Matcher::ReduceInst_Chain_Rule(State* s, int rule, Node* &mem, MachNode* mach) {
1861 // 'op' is what I am expecting to receive
1862 int op = _leftOp[rule];
1863 // Operand type to catch childs result
1864 // This is what my child will give me.
1865 unsigned int opnd_class_instance = s->rule(op);
1866 // Choose between operand class or not.
1867 // This is what I will receive.
1868 int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1869 // New rule for child. Chase operand classes to get the actual rule.
1870 unsigned int newrule = s->rule(catch_op);
1871
1872 if (newrule < NUM_OPERANDS) {
1873 // Chain from operand or operand class, may be output of shared node
1874 assert(opnd_class_instance < NUM_OPERANDS, "Bad AD file: Instruction chain rule must chain from operand");
1875 // Insert operand into array of operands for this instruction
1876 mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
1877
1878 ReduceOper(s, newrule, mem, mach);
1879 } else {
1880 // Chain from the result of an instruction
1881 assert(newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1882 mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
1883 Node *mem1 = (Node*)1;
1884 DEBUG_ONLY(Node *save_mem_node = _mem_node;)
1885 mach->add_req( ReduceInst(s, newrule, mem1) );
1886 DEBUG_ONLY(_mem_node = save_mem_node;)
1887 }
1888 return;
1889 }
1890
1891
1892 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1893 handle_precedence_edges(s->_leaf, mach);
1894
1895 if( s->_leaf->is_Load() ) {
1896 Node *mem2 = s->_leaf->in(MemNode::Memory);
1897 assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1898 DEBUG_ONLY( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1899 mem = mem2;
1900 }
1901 if( s->_leaf->in(0) != nullptr && s->_leaf->req() > 1) {
1902 if( mach->in(0) == nullptr )
1903 mach->set_req(0, s->_leaf->in(0));
1904 }
1905
1906 // Now recursively walk the state tree & add operand list.
1907 for( uint i=0; i<2; i++ ) { // binary tree
1908 State *newstate = s->_kids[i];
1909 if( newstate == nullptr ) break; // Might only have 1 child
1910 // 'op' is what I am expecting to receive
1911 int op;
1912 if( i == 0 ) {
1913 op = _leftOp[rule];
1914 } else {
1915 op = _rightOp[rule];
1916 }
1917 // Operand type to catch childs result
1918 // This is what my child will give me.
1919 int opnd_class_instance = newstate->rule(op);
1920 // Choose between operand class or not.
1921 // This is what I will receive.
1922 int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1923 // New rule for child. Chase operand classes to get the actual rule.
1924 int newrule = newstate->rule(catch_op);
1925
1926 if (newrule < NUM_OPERANDS) { // Operand/operandClass or internalOp/instruction?
1927 // Operand/operandClass
1928 // Insert operand into array of operands for this instruction
1929 mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
1930 ReduceOper(newstate, newrule, mem, mach);
1931
1932 } else { // Child is internal operand or new instruction
1933 if (newrule < _LAST_MACH_OPER) { // internal operand or instruction?
1934 // internal operand --> call ReduceInst_Interior
1935 // Interior of complex instruction. Do nothing but recurse.
1936 num_opnds = ReduceInst_Interior(newstate, newrule, mem, mach, num_opnds);
1937 } else {
1938 // instruction --> call build operand( ) to catch result
1939 // --> ReduceInst( newrule )
1940 mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
1941 Node *mem1 = (Node*)1;
1942 DEBUG_ONLY(Node *save_mem_node = _mem_node;)
1943 mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1944 DEBUG_ONLY(_mem_node = save_mem_node;)
1945 }
1946 }
1947 assert( mach->_opnds[num_opnds-1], "" );
1948 }
1949 return num_opnds;
1950 }
1951
1952 // This routine walks the interior of possible complex operands.
1953 // At each point we check our children in the match tree:
1954 // (1) No children -
1955 // We are a leaf; add _leaf field as an input to the MachNode
1956 // (2) Child is an internal operand -
1957 // Skip over it ( do nothing )
1958 // (3) Child is an instruction -
1959 // Call ReduceInst recursively and
1960 // and instruction as an input to the MachNode
1961 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1962 assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1963 State *kid = s->_kids[0];
1964 assert( kid == nullptr || s->_leaf->in(0) == nullptr, "internal operands have no control" );
1965
1966 // Leaf? And not subsumed?
1967 if( kid == nullptr && !_swallowed[rule] ) {
1968 mach->add_req( s->_leaf ); // Add leaf pointer
1969 return; // Bail out
1970 }
1971
1972 if( s->_leaf->is_Load() ) {
1973 assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1974 mem = s->_leaf->in(MemNode::Memory);
1975 DEBUG_ONLY(_mem_node = s->_leaf;)
1976 }
1977
1978 handle_precedence_edges(s->_leaf, mach);
1979
1980 if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1981 if( !mach->in(0) )
1982 mach->set_req(0,s->_leaf->in(0));
1983 else {
1984 assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1985 }
1986 }
1987
1988 for (uint i = 0; kid != nullptr && i < 2; kid = s->_kids[1], i++) { // binary tree
1989 int newrule;
1990 if( i == 0) {
1991 newrule = kid->rule(_leftOp[rule]);
1992 } else {
1993 newrule = kid->rule(_rightOp[rule]);
1994 }
1995
1996 if (newrule < _LAST_MACH_OPER) { // Operand or instruction?
1997 // Internal operand; recurse but do nothing else
1998 ReduceOper(kid, newrule, mem, mach);
1999
2000 } else { // Child is a new instruction
2001 // Reduce the instruction, and add a direct pointer from this
2002 // machine instruction to the newly reduced one.
2003 Node *mem1 = (Node*)1;
2004 DEBUG_ONLY(Node *save_mem_node = _mem_node;)
2005 mach->add_req( ReduceInst( kid, newrule, mem1 ) );
2006 DEBUG_ONLY(_mem_node = save_mem_node;)
2007 }
2008 }
2009 }
2010
2011
2012 // -------------------------------------------------------------------------
2013 // Java-Java calling convention
2014 // (what you use when Java calls Java)
2015
2016 //------------------------------find_receiver----------------------------------
2017 // For a given signature, return the OptoReg for parameter 0.
2018 OptoReg::Name Matcher::find_receiver() {
2019 VMRegPair regs;
2020 BasicType sig_bt = T_OBJECT;
2021 SharedRuntime::java_calling_convention(&sig_bt, ®s, 1);
2022 // Return argument 0 register. In the LP64 build pointers
2023 // take 2 registers, but the VM wants only the 'main' name.
2024 return OptoReg::as_OptoReg(regs.first());
2025 }
2026
2027 bool Matcher::is_vshift_con_pattern(Node* n, Node* m) {
2028 if (n != nullptr && m != nullptr) {
2029 return VectorNode::is_vector_shift(n) &&
2030 VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
2031 }
2032 return false;
2033 }
2034
2035 bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2036 // Must clone all producers of flags, or we will not match correctly.
2037 // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2038 // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
2039 // are also there, so we may match a float-branch to int-flags and
2040 // expect the allocator to haul the flags from the int-side to the
2041 // fp-side. No can do.
2042 if (_must_clone[m->Opcode()]) {
2043 mstack.push(m, Visit);
2044 return true;
2045 }
2046 return pd_clone_node(n, m, mstack);
2047 }
2048
2049 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2050 Node *off = m->in(AddPNode::Offset);
2051 if (off->is_Con()) {
2052 address_visited.test_set(m->_idx); // Flag as address_visited
2053 mstack.push(m->in(AddPNode::Address), Pre_Visit);
2054 // Clone X+offset as it also folds into most addressing expressions
2055 mstack.push(off, Visit);
2056 mstack.push(m->in(AddPNode::Base), Pre_Visit);
2057 return true;
2058 }
2059 return false;
2060 }
2061
2062 // A method-klass-holder may be passed in the inline_cache_reg
2063 // and then expanded into the inline_cache_reg and a method_ptr register
2064 // defined in ad_<arch>.cpp
2065
2066 //------------------------------find_shared------------------------------------
2067 // Set bits if Node is shared or otherwise a root
2068 void Matcher::find_shared(Node* n) {
2069 // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
2070 MStack mstack(C->live_nodes() * 2);
2071 // Mark nodes as address_visited if they are inputs to an address expression
2072 VectorSet address_visited;
2073 mstack.push(n, Visit); // Don't need to pre-visit root node
2074 while (mstack.is_nonempty()) {
2075 n = mstack.node(); // Leave node on stack
2076 Node_State nstate = mstack.state();
2077 uint nop = n->Opcode();
2078 if (nstate == Pre_Visit) {
2079 if (address_visited.test(n->_idx)) { // Visited in address already?
2080 // Flag as visited and shared now.
2081 set_visited(n);
2082 }
2083 if (is_visited(n)) { // Visited already?
2084 // Node is shared and has no reason to clone. Flag it as shared.
2085 // This causes it to match into a register for the sharing.
2086 set_shared(n); // Flag as shared and
2087 if (n->is_DecodeNarrowPtr()) {
2088 // Oop field/array element loads must be shared but since
2089 // they are shared through a DecodeN they may appear to have
2090 // a single use so force sharing here.
2091 set_shared(n->in(1));
2092 }
2093 mstack.pop(); // remove node from stack
2094 continue;
2095 }
2096 nstate = Visit; // Not already visited; so visit now
2097 }
2098 if (nstate == Visit) {
2099 mstack.set_state(Post_Visit);
2100 set_visited(n); // Flag as visited now
2101 bool mem_op = false;
2102 int mem_addr_idx = MemNode::Address;
2103 if (find_shared_visit(mstack, n, nop, mem_op, mem_addr_idx)) {
2104 continue;
2105 }
2106 for (int i = n->len() - 1; i >= 0; --i) { // For my children
2107 Node* m = n->in(i); // Get ith input
2108 if (m == nullptr) {
2109 continue; // Ignore nulls
2110 }
2111 if (clone_node(n, m, mstack)) {
2112 continue;
2113 }
2114
2115 // Clone addressing expressions as they are "free" in memory access instructions
2116 if (mem_op && i == mem_addr_idx && m->is_AddP() &&
2117 // When there are other uses besides address expressions
2118 // put it on stack and mark as shared.
2119 !is_visited(m)) {
2120 // Some inputs for address expression are not put on stack
2121 // to avoid marking them as shared and forcing them into register
2122 // if they are used only in address expressions.
2123 // But they should be marked as shared if there are other uses
2124 // besides address expressions.
2125
2126 if (pd_clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
2127 continue;
2128 }
2129 } // if( mem_op &&
2130 mstack.push(m, Pre_Visit);
2131 } // for(int i = ...)
2132 }
2133 else if (nstate == Alt_Post_Visit) {
2134 mstack.pop(); // Remove node from stack
2135 // We cannot remove the Cmp input from the Bool here, as the Bool may be
2136 // shared and all users of the Bool need to move the Cmp in parallel.
2137 // This leaves both the Bool and the If pointing at the Cmp. To
2138 // prevent the Matcher from trying to Match the Cmp along both paths
2139 // BoolNode::match_edge always returns a zero.
2140
2141 // We reorder the Op_If in a pre-order manner, so we can visit without
2142 // accidentally sharing the Cmp (the Bool and the If make 2 users).
2143 n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2144 }
2145 else if (nstate == Post_Visit) {
2146 mstack.pop(); // Remove node from stack
2147
2148 // Now hack a few special opcodes
2149 uint opcode = n->Opcode();
2150 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_post_visit(this, n, opcode);
2151 if (!gc_handled) {
2152 find_shared_post_visit(n, opcode);
2153 }
2154 }
2155 else {
2156 ShouldNotReachHere();
2157 }
2158 } // end of while (mstack.is_nonempty())
2159 }
2160
2161 bool Matcher::find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) {
2162 switch(opcode) { // Handle some opcodes special
2163 case Op_Phi: // Treat Phis as shared roots
2164 case Op_Parm:
2165 case Op_Proj: // All handled specially during matching
2166 case Op_SafePointScalarObject:
2167 set_shared(n);
2168 set_dontcare(n);
2169 break;
2170 case Op_If:
2171 case Op_CountedLoopEnd:
2172 mstack.set_state(Alt_Post_Visit); // Alternative way
2173 // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps
2174 // with matching cmp/branch in 1 instruction. The Matcher needs the
2175 // Bool and CmpX side-by-side, because it can only get at constants
2176 // that are at the leaves of Match trees, and the Bool's condition acts
2177 // as a constant here.
2178 mstack.push(n->in(1), Visit); // Clone the Bool
2179 mstack.push(n->in(0), Pre_Visit); // Visit control input
2180 return true; // while (mstack.is_nonempty())
2181 case Op_ConvI2D: // These forms efficiently match with a prior
2182 case Op_ConvI2F: // Load but not a following Store
2183 if( n->in(1)->is_Load() && // Prior load
2184 n->outcnt() == 1 && // Not already shared
2185 n->unique_out()->is_Store() ) // Following store
2186 set_shared(n); // Force it to be a root
2187 break;
2188 case Op_ReverseBytesI:
2189 case Op_ReverseBytesL:
2190 if( n->in(1)->is_Load() && // Prior load
2191 n->outcnt() == 1 ) // Not already shared
2192 set_shared(n); // Force it to be a root
2193 break;
2194 case Op_BoxLock: // Can't match until we get stack-regs in ADLC
2195 case Op_IfFalse:
2196 case Op_IfTrue:
2197 case Op_MachProj:
2198 case Op_MergeMem:
2199 case Op_Catch:
2200 case Op_CatchProj:
2201 case Op_CProj:
2202 case Op_JumpProj:
2203 case Op_JProj:
2204 case Op_NeverBranch:
2205 set_dontcare(n);
2206 break;
2207 case Op_Jump:
2208 mstack.push(n->in(1), Pre_Visit); // Switch Value (could be shared)
2209 mstack.push(n->in(0), Pre_Visit); // Visit Control input
2210 return true; // while (mstack.is_nonempty())
2211 case Op_StrComp:
2212 case Op_StrEquals:
2213 case Op_StrIndexOf:
2214 case Op_StrIndexOfChar:
2215 case Op_AryEq:
2216 case Op_VectorizedHashCode:
2217 case Op_CountPositives:
2218 case Op_StrInflatedCopy:
2219 case Op_StrCompressedCopy:
2220 case Op_EncodeISOArray:
2221 case Op_FmaD:
2222 case Op_FmaF:
2223 case Op_FmaHF:
2224 case Op_FmaVD:
2225 case Op_FmaVF:
2226 case Op_FmaVHF:
2227 case Op_MacroLogicV:
2228 case Op_VectorCmpMasked:
2229 case Op_CompressV:
2230 case Op_CompressM:
2231 case Op_ExpandV:
2232 case Op_VectorLoadMask:
2233 set_shared(n); // Force result into register (it will be anyways)
2234 break;
2235 case Op_ConP: { // Convert pointers above the centerline to NUL
2236 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2237 const TypePtr* tp = tn->type()->is_ptr();
2238 if (tp->_ptr == TypePtr::AnyNull) {
2239 tn->set_type(TypePtr::NULL_PTR);
2240 }
2241 break;
2242 }
2243 case Op_ConN: { // Convert narrow pointers above the centerline to NUL
2244 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2245 const TypePtr* tp = tn->type()->make_ptr();
2246 if (tp && tp->_ptr == TypePtr::AnyNull) {
2247 tn->set_type(TypeNarrowOop::NULL_PTR);
2248 }
2249 break;
2250 }
2251 case Op_Binary: // These are introduced in the Post_Visit state.
2252 ShouldNotReachHere();
2253 break;
2254 case Op_ClearArray:
2255 case Op_SafePoint:
2256 mem_op = true;
2257 break;
2258 default:
2259 if( n->is_Store() ) {
2260 // Do match stores, despite no ideal reg
2261 mem_op = true;
2262 break;
2263 }
2264 if( n->is_Mem() ) { // Loads and LoadStores
2265 mem_op = true;
2266 // Loads must be root of match tree due to prior load conflict
2267 if( C->subsume_loads() == false )
2268 set_shared(n);
2269 }
2270 // Fall into default case
2271 if( !n->ideal_reg() )
2272 set_dontcare(n); // Unmatchable Nodes
2273 } // end_switch
2274 return false;
2275 }
2276
2277 void Matcher::find_shared_post_visit(Node* n, uint opcode) {
2278 if (n->is_predicated_vector()) {
2279 // Restructure into binary trees for Matching.
2280 if (n->req() == 4) {
2281 n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2282 n->set_req(2, n->in(3));
2283 n->del_req(3);
2284 } else if (n->req() == 5) {
2285 n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2286 n->set_req(2, new BinaryNode(n->in(3), n->in(4)));
2287 n->del_req(4);
2288 n->del_req(3);
2289 } else if (n->req() == 6) {
2290 Node* b3 = new BinaryNode(n->in(4), n->in(5));
2291 Node* b2 = new BinaryNode(n->in(3), b3);
2292 Node* b1 = new BinaryNode(n->in(2), b2);
2293 n->set_req(2, b1);
2294 n->del_req(5);
2295 n->del_req(4);
2296 n->del_req(3);
2297 }
2298 return;
2299 }
2300
2301 switch(opcode) { // Handle some opcodes special
2302 case Op_CompareAndExchangeB:
2303 case Op_CompareAndExchangeS:
2304 case Op_CompareAndExchangeI:
2305 case Op_CompareAndExchangeL:
2306 case Op_CompareAndExchangeP:
2307 case Op_CompareAndExchangeN:
2308 case Op_WeakCompareAndSwapB:
2309 case Op_WeakCompareAndSwapS:
2310 case Op_WeakCompareAndSwapI:
2311 case Op_WeakCompareAndSwapL:
2312 case Op_WeakCompareAndSwapP:
2313 case Op_WeakCompareAndSwapN:
2314 case Op_CompareAndSwapB:
2315 case Op_CompareAndSwapS:
2316 case Op_CompareAndSwapI:
2317 case Op_CompareAndSwapL:
2318 case Op_CompareAndSwapP:
2319 case Op_CompareAndSwapN: { // Convert trinary to binary-tree
2320 Node* newval = n->in(MemNode::ValueIn);
2321 Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
2322 Node* pair = new BinaryNode(oldval, newval);
2323 n->set_req(MemNode::ValueIn, pair);
2324 n->del_req(LoadStoreConditionalNode::ExpectedIn);
2325 break;
2326 }
2327 case Op_CMoveD: // Convert trinary to binary-tree
2328 case Op_CMoveF:
2329 case Op_CMoveI:
2330 case Op_CMoveL:
2331 case Op_CMoveN:
2332 case Op_CMoveP: {
2333 // Restructure into a binary tree for Matching. It's possible that
2334 // we could move this code up next to the graph reshaping for IfNodes
2335 // or vice-versa, but I do not want to debug this for Ladybird.
2336 // 10/2/2000 CNC.
2337 Node* pair1 = new BinaryNode(n->in(1), n->in(1)->in(1));
2338 n->set_req(1, pair1);
2339 Node* pair2 = new BinaryNode(n->in(2), n->in(3));
2340 n->set_req(2, pair2);
2341 n->del_req(3);
2342 break;
2343 }
2344 case Op_MacroLogicV: {
2345 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2346 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2347 n->set_req(1, pair1);
2348 n->set_req(2, pair2);
2349 n->del_req(4);
2350 n->del_req(3);
2351 break;
2352 }
2353 case Op_StoreVectorMasked: {
2354 Node* pair = new BinaryNode(n->in(3), n->in(4));
2355 n->set_req(3, pair);
2356 n->del_req(4);
2357 break;
2358 }
2359 case Op_SelectFromTwoVector:
2360 case Op_LoopLimit: {
2361 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2362 n->set_req(1, pair1);
2363 n->set_req(2, n->in(3));
2364 n->del_req(3);
2365 break;
2366 }
2367 case Op_StrEquals:
2368 case Op_StrIndexOfChar: {
2369 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2370 n->set_req(2, pair1);
2371 n->set_req(3, n->in(4));
2372 n->del_req(4);
2373 break;
2374 }
2375 case Op_StrComp:
2376 case Op_StrIndexOf:
2377 case Op_VectorizedHashCode: {
2378 Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2379 n->set_req(2, pair1);
2380 Node* pair2 = new BinaryNode(n->in(4),n->in(5));
2381 n->set_req(3, pair2);
2382 n->del_req(5);
2383 n->del_req(4);
2384 break;
2385 }
2386 case Op_EncodeISOArray:
2387 case Op_StrCompressedCopy:
2388 case Op_StrInflatedCopy: {
2389 // Restructure into a binary tree for Matching.
2390 Node* pair = new BinaryNode(n->in(3), n->in(4));
2391 n->set_req(3, pair);
2392 n->del_req(4);
2393 break;
2394 }
2395 case Op_FmaD:
2396 case Op_FmaF:
2397 case Op_FmaHF:
2398 case Op_FmaVD:
2399 case Op_FmaVF:
2400 case Op_FmaVHF: {
2401 // Restructure into a binary tree for Matching.
2402 Node* pair = new BinaryNode(n->in(1), n->in(2));
2403 n->set_req(2, pair);
2404 n->set_req(1, n->in(3));
2405 n->del_req(3);
2406 break;
2407 }
2408 case Op_MulAddS2I: {
2409 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2410 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2411 n->set_req(1, pair1);
2412 n->set_req(2, pair2);
2413 n->del_req(4);
2414 n->del_req(3);
2415 break;
2416 }
2417 case Op_ClearArray: {
2418 Node* pair = new BinaryNode(n->in(2), n->in(3));
2419 n->set_req(2, pair);
2420 n->set_req(3, n->in(4));
2421 n->del_req(4);
2422 break;
2423 }
2424 case Op_VectorCmpMasked:
2425 case Op_CopySignD:
2426 case Op_SignumVF:
2427 case Op_SignumVD:
2428 case Op_SignumF:
2429 case Op_SignumD: {
2430 Node* pair = new BinaryNode(n->in(2), n->in(3));
2431 n->set_req(2, pair);
2432 n->del_req(3);
2433 break;
2434 }
2435 case Op_VectorBlend:
2436 case Op_VectorInsert: {
2437 Node* pair = new BinaryNode(n->in(1), n->in(2));
2438 n->set_req(1, pair);
2439 n->set_req(2, n->in(3));
2440 n->del_req(3);
2441 break;
2442 }
2443 case Op_LoadVectorGatherMasked: // fall-through
2444 case Op_StoreVectorScatter: {
2445 Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2446 n->set_req(MemNode::ValueIn, pair);
2447 n->del_req(MemNode::ValueIn+1);
2448 break;
2449 }
2450 case Op_StoreVectorScatterMasked: {
2451 Node* pair = new BinaryNode(n->in(MemNode::ValueIn+1), n->in(MemNode::ValueIn+2));
2452 n->set_req(MemNode::ValueIn+1, pair);
2453 n->del_req(MemNode::ValueIn+2);
2454 pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2455 n->set_req(MemNode::ValueIn, pair);
2456 n->del_req(MemNode::ValueIn+1);
2457 break;
2458 }
2459 case Op_VectorMaskCmp: {
2460 n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2461 n->set_req(2, n->in(3));
2462 n->del_req(3);
2463 break;
2464 }
2465 case Op_PartialSubtypeCheck: {
2466 if (UseSecondarySupersTable && n->in(2)->is_Con()) {
2467 // PartialSubtypeCheck uses both constant and register operands for superclass input.
2468 n->set_req(2, new BinaryNode(n->in(2), n->in(2)));
2469 break;
2470 }
2471 break;
2472 }
2473 case Op_StoreLSpecial: {
2474 if (n->req() > (MemNode::ValueIn + 1) && n->in(MemNode::ValueIn + 1) != nullptr) {
2475 Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn + 1));
2476 n->set_req(MemNode::ValueIn, pair);
2477 n->del_req(MemNode::ValueIn + 1);
2478 }
2479 break;
2480 }
2481 default:
2482 break;
2483 }
2484 }
2485
2486 #ifndef PRODUCT
2487 void Matcher::record_new2old(Node* newn, Node* old) {
2488 _new2old_map.map(newn->_idx, old);
2489 if (!_reused.test_set(old->_igv_idx)) {
2490 // Reuse the Ideal-level IGV identifier so that the node can be tracked
2491 // across matching. If there are multiple machine nodes expanded from the
2492 // same Ideal node, only one will reuse its IGV identifier.
2493 newn->_igv_idx = old->_igv_idx;
2494 }
2495 }
2496
2497 // machine-independent root to machine-dependent root
2498 void Matcher::dump_old2new_map() {
2499 _old2new_map.dump();
2500 }
2501 #endif // !PRODUCT
2502
2503 //---------------------------collect_null_checks-------------------------------
2504 // Find null checks in the ideal graph; write a machine-specific node for
2505 // it. Used by later implicit-null-check handling. Actually collects
2506 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2507 // value being tested.
2508 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2509 Node *iff = proj->in(0);
2510 if( iff->Opcode() == Op_If ) {
2511 // During matching If's have Bool & Cmp side-by-side
2512 BoolNode *b = iff->in(1)->as_Bool();
2513 Node *cmp = iff->in(2);
2514 int opc = cmp->Opcode();
2515 if (opc != Op_CmpP && opc != Op_CmpN) return;
2516
2517 const Type* ct = cmp->in(2)->bottom_type();
2518 if (ct == TypePtr::NULL_PTR ||
2519 (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2520
2521 bool push_it = false;
2522 if( proj->Opcode() == Op_IfTrue ) {
2523 #ifndef PRODUCT
2524 extern uint all_null_checks_found;
2525 all_null_checks_found++;
2526 #endif
2527 if( b->_test._test == BoolTest::ne ) {
2528 push_it = true;
2529 }
2530 } else {
2531 assert( proj->Opcode() == Op_IfFalse, "" );
2532 if( b->_test._test == BoolTest::eq ) {
2533 push_it = true;
2534 }
2535 }
2536 if( push_it ) {
2537 _null_check_tests.push(proj);
2538 Node* val = cmp->in(1);
2539 #ifdef _LP64
2540 if (val->bottom_type()->isa_narrowoop() &&
2541 !Matcher::narrow_oop_use_complex_address()) {
2542 //
2543 // Look for DecodeN node which should be pinned to orig_proj.
2544 // On platforms (Sparc) which can not handle 2 adds
2545 // in addressing mode we have to keep a DecodeN node and
2546 // use it to do implicit null check in address.
2547 //
2548 // DecodeN node was pinned to non-null path (orig_proj) during
2549 // CastPP transformation in final_graph_reshaping_impl().
2550 //
2551 uint cnt = orig_proj->outcnt();
2552 for (uint i = 0; i < orig_proj->outcnt(); i++) {
2553 Node* d = orig_proj->raw_out(i);
2554 if (d->is_DecodeN() && d->in(1) == val) {
2555 val = d;
2556 val->set_req(0, nullptr); // Unpin now.
2557 // Mark this as special case to distinguish from
2558 // a regular case: CmpP(DecodeN, null).
2559 val = (Node*)(((intptr_t)val) | 1);
2560 break;
2561 }
2562 }
2563 }
2564 #endif
2565 _null_check_tests.push(val);
2566 }
2567 }
2568 }
2569 }
2570
2571 //---------------------------validate_null_checks------------------------------
2572 // Its possible that the value being null checked is not the root of a match
2573 // tree. If so, I cannot use the value in an implicit null check.
2574 void Matcher::validate_null_checks( ) {
2575 uint cnt = _null_check_tests.size();
2576 for( uint i=0; i < cnt; i+=2 ) {
2577 Node *test = _null_check_tests[i];
2578 Node *val = _null_check_tests[i+1];
2579 bool is_decoden = ((intptr_t)val) & 1;
2580 val = (Node*)(((intptr_t)val) & ~1);
2581 if (has_new_node(val)) {
2582 Node* new_val = new_node(val);
2583 if (is_decoden) {
2584 assert(val->is_DecodeNarrowPtr() && val->in(0) == nullptr, "sanity");
2585 // Note: new_val may have a control edge if
2586 // the original ideal node DecodeN was matched before
2587 // it was unpinned in Matcher::collect_null_checks().
2588 // Unpin the mach node and mark it.
2589 new_val->set_req(0, nullptr);
2590 new_val = (Node*)(((intptr_t)new_val) | 1);
2591 }
2592 // Is a match-tree root, so replace with the matched value
2593 _null_check_tests.map(i+1, new_val);
2594 } else {
2595 // Yank from candidate list
2596 _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2597 _null_check_tests.map(i,_null_check_tests[--cnt]);
2598 _null_check_tests.pop();
2599 _null_check_tests.pop();
2600 i-=2;
2601 }
2602 }
2603 }
2604
2605 bool Matcher::gen_narrow_oop_implicit_null_checks() {
2606 // Advice matcher to perform null checks on the narrow oop side.
2607 // Implicit checks are not possible on the uncompressed oop side anyway
2608 // (at least not for read accesses).
2609 // Performs significantly better (especially on Power 6).
2610 if (!os::zero_page_read_protected()) {
2611 return true;
2612 }
2613 return CompressedOops::use_implicit_null_checks() &&
2614 (narrow_oop_use_complex_address() ||
2615 CompressedOops::base() != nullptr);
2616 }
2617
2618 // Compute RegMask for an ideal register.
2619 const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2620 assert(!C->failing_internal() || C->failure_is_artificial(), "already failing.");
2621 if (C->failing()) {
2622 return nullptr;
2623 }
2624 const Type* t = Type::mreg2type[ideal_reg];
2625 if (t == nullptr) {
2626 assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
2627 return nullptr; // not supported
2628 }
2629 Node* fp = ret->in(TypeFunc::FramePtr);
2630 Node* mem = ret->in(TypeFunc::Memory);
2631 const TypePtr* atp = TypePtr::BOTTOM;
2632 MemNode::MemOrd mo = MemNode::unordered;
2633
2634 Node* spill;
2635 switch (ideal_reg) {
2636 case Op_RegN: spill = new LoadNNode(nullptr, mem, fp, atp, t->is_narrowoop(), mo); break;
2637 case Op_RegI: spill = new LoadINode(nullptr, mem, fp, atp, t->is_int(), mo); break;
2638 case Op_RegP: spill = new LoadPNode(nullptr, mem, fp, atp, t->is_ptr(), mo); break;
2639 case Op_RegF: spill = new LoadFNode(nullptr, mem, fp, atp, t, mo); break;
2640 case Op_RegD: spill = new LoadDNode(nullptr, mem, fp, atp, t, mo); break;
2641 case Op_RegL: spill = new LoadLNode(nullptr, mem, fp, atp, t->is_long(), mo); break;
2642
2643 case Op_VecA: // fall-through
2644 case Op_VecS: // fall-through
2645 case Op_VecD: // fall-through
2646 case Op_VecX: // fall-through
2647 case Op_VecY: // fall-through
2648 case Op_VecZ: spill = new LoadVectorNode(nullptr, mem, fp, atp, t->is_vect()); break;
2649 case Op_RegVectMask: return Matcher::predicate_reg_mask();
2650
2651 default: ShouldNotReachHere();
2652 }
2653 MachNode* mspill = match_tree(spill);
2654 assert(mspill != nullptr || C->failure_is_artificial(), "matching failed: %d", ideal_reg);
2655 if (C->failing()) {
2656 return nullptr;
2657 }
2658 // Handle generic vector operand case
2659 if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2660 specialize_mach_node(mspill);
2661 }
2662 return &mspill->out_RegMask();
2663 }
2664
2665 // Process Mach IR right after selection phase is over.
2666 void Matcher::do_postselect_cleanup() {
2667 if (supports_generic_vector_operands) {
2668 specialize_generic_vector_operands();
2669 if (C->failing()) return;
2670 }
2671 }
2672
2673 //----------------------------------------------------------------------
2674 // Generic machine operands elision.
2675 //----------------------------------------------------------------------
2676
2677 // Compute concrete vector operand for a generic TEMP vector mach node based on its user info.
2678 void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
2679 assert(use->in(idx) == tmp, "not a user");
2680 assert(!Matcher::is_generic_vector(use->_opnds[0]), "use not processed yet");
2681
2682 if ((uint)idx == use->two_adr()) { // DEF_TEMP case
2683 tmp->_opnds[0] = use->_opnds[0]->clone();
2684 } else {
2685 uint ideal_vreg = vector_ideal_reg(C->max_vector_size());
2686 tmp->_opnds[0] = Matcher::pd_specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg, true /*is_temp*/);
2687 }
2688 }
2689
2690 // Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx).
2691 MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) {
2692 assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates");
2693 Node* def = nullptr;
2694 if (opnd_idx == 0) { // DEF
2695 def = m; // use mach node itself to compute vector operand type
2696 } else {
2697 int base_idx = m->operand_index(opnd_idx);
2698 def = m->in(base_idx);
2699 if (def->is_Mach()) {
2700 if (def->is_MachTemp() && Matcher::is_generic_vector(def->as_Mach()->_opnds[0])) {
2701 specialize_temp_node(def->as_MachTemp(), m, base_idx); // MachTemp node use site
2702 } else if (is_reg2reg_move(def->as_Mach())) {
2703 def = def->in(1); // skip over generic reg-to-reg moves
2704 }
2705 }
2706 }
2707 assert(def->bottom_type()->isa_vect(), "not a vector");
2708 uint ideal_vreg = def->bottom_type()->ideal_reg();
2709 return Matcher::pd_specialize_generic_vector_operand(m->_opnds[opnd_idx], ideal_vreg, false /*is_temp*/);
2710 }
2711
2712 void Matcher::specialize_mach_node(MachNode* m) {
2713 assert(!m->is_MachTemp(), "processed along with its user");
2714 // For generic use operands pull specific register class operands from
2715 // its def instruction's output operand (def operand).
2716 for (uint i = 0; i < m->num_opnds(); i++) {
2717 if (Matcher::is_generic_vector(m->_opnds[i])) {
2718 m->_opnds[i] = specialize_vector_operand(m, i);
2719 }
2720 }
2721 }
2722
2723 // Replace generic vector operands with concrete vector operands and eliminate generic reg-to-reg moves from the graph.
2724 void Matcher::specialize_generic_vector_operands() {
2725 assert(supports_generic_vector_operands, "sanity");
2726 ResourceMark rm;
2727
2728 // Replace generic vector operands (vec/legVec) with concrete ones (vec[SDXYZ]/legVec[SDXYZ])
2729 // and remove reg-to-reg vector moves (MoveVec2Leg and MoveLeg2Vec).
2730 Unique_Node_List live_nodes;
2731 C->identify_useful_nodes(live_nodes);
2732
2733 while (live_nodes.size() > 0) {
2734 MachNode* m = live_nodes.pop()->isa_Mach();
2735 if (m != nullptr) {
2736 if (Matcher::is_reg2reg_move(m)) {
2737 // Register allocator properly handles vec <=> leg moves using register masks.
2738 int opnd_idx = m->operand_index(1);
2739 Node* def = m->in(opnd_idx);
2740 m->subsume_by(def, C);
2741 } else if (m->is_MachTemp()) {
2742 // process MachTemp nodes at use site (see Matcher::specialize_vector_operand)
2743 } else {
2744 specialize_mach_node(m);
2745 }
2746 }
2747 }
2748 }
2749
2750 uint Matcher::vector_length(const Node* n) {
2751 const TypeVect* vt = n->bottom_type()->is_vect();
2752 return vt->length();
2753 }
2754
2755 uint Matcher::vector_length(const MachNode* use, const MachOper* opnd) {
2756 int def_idx = use->operand_index(opnd);
2757 Node* def = use->in(def_idx);
2758 return def->bottom_type()->is_vect()->length();
2759 }
2760
2761 uint Matcher::vector_length_in_bytes(const Node* n) {
2762 const TypeVect* vt = n->bottom_type()->is_vect();
2763 return vt->length_in_bytes();
2764 }
2765
2766 uint Matcher::vector_length_in_bytes(const MachNode* use, const MachOper* opnd) {
2767 uint def_idx = use->operand_index(opnd);
2768 Node* def = use->in(def_idx);
2769 return def->bottom_type()->is_vect()->length_in_bytes();
2770 }
2771
2772 BasicType Matcher::vector_element_basic_type(const Node* n) {
2773 const TypeVect* vt = n->bottom_type()->is_vect();
2774 return vt->element_basic_type();
2775 }
2776
2777 BasicType Matcher::vector_element_basic_type(const MachNode* use, const MachOper* opnd) {
2778 int def_idx = use->operand_index(opnd);
2779 Node* def = use->in(def_idx);
2780 return def->bottom_type()->is_vect()->element_basic_type();
2781 }
2782
2783 bool Matcher::is_non_long_integral_vector(const Node* n) {
2784 BasicType bt = vector_element_basic_type(n);
2785 assert(bt != T_CHAR, "char is not allowed in vector");
2786 return is_subword_type(bt) || bt == T_INT;
2787 }
2788
2789 bool Matcher::is_encode_and_store_pattern(const Node* n, const Node* m) {
2790 if (n == nullptr ||
2791 m == nullptr ||
2792 n->Opcode() != Op_StoreN ||
2793 !m->is_EncodeP() ||
2794 n->as_Store()->barrier_data() == 0) {
2795 return false;
2796 }
2797 assert(m == n->in(MemNode::ValueIn), "m should be input to n");
2798 return true;
2799 }
2800
2801 #ifdef ASSERT
2802 bool Matcher::verify_after_postselect_cleanup() {
2803 assert(!C->failing_internal() || C->failure_is_artificial(), "sanity");
2804 if (supports_generic_vector_operands) {
2805 Unique_Node_List useful;
2806 C->identify_useful_nodes(useful);
2807 for (uint i = 0; i < useful.size(); i++) {
2808 MachNode* m = useful.at(i)->isa_Mach();
2809 if (m != nullptr) {
2810 assert(!Matcher::is_reg2reg_move(m), "no MoveVec nodes allowed");
2811 for (uint j = 0; j < m->num_opnds(); j++) {
2812 assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed");
2813 }
2814 }
2815 }
2816 }
2817 return true;
2818 }
2819 #endif // ASSERT
2820
2821 // Used by the DFA in dfa_xxx.cpp. Check for a following barrier or
2822 // atomic instruction acting as a store_load barrier without any
2823 // intervening volatile load, and thus we don't need a barrier here.
2824 // We retain the Node to act as a compiler ordering barrier.
2825 bool Matcher::post_store_load_barrier(const Node* vmb) {
2826 Compile* C = Compile::current();
2827 assert(vmb->is_MemBar(), "");
2828 assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2829 const MemBarNode* membar = vmb->as_MemBar();
2830
2831 // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2832 Node* ctrl = nullptr;
2833 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2834 Node* p = membar->fast_out(i);
2835 assert(p->is_Proj(), "only projections here");
2836 if ((p->as_Proj()->_con == TypeFunc::Control) &&
2837 !C->node_arena()->contains(p)) { // Unmatched old-space only
2838 ctrl = p;
2839 break;
2840 }
2841 }
2842 assert((ctrl != nullptr), "missing control projection");
2843
2844 for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2845 Node *x = ctrl->fast_out(j);
2846 int xop = x->Opcode();
2847
2848 // We don't need current barrier if we see another or a lock
2849 // before seeing volatile load.
2850 //
2851 // Op_Fastunlock previously appeared in the Op_* list below.
2852 // With the advent of 1-0 lock operations we're no longer guaranteed
2853 // that a monitor exit operation contains a serializing instruction.
2854
2855 if (xop == Op_MemBarVolatile ||
2856 xop == Op_CompareAndExchangeB ||
2857 xop == Op_CompareAndExchangeS ||
2858 xop == Op_CompareAndExchangeI ||
2859 xop == Op_CompareAndExchangeL ||
2860 xop == Op_CompareAndExchangeP ||
2861 xop == Op_CompareAndExchangeN ||
2862 xop == Op_WeakCompareAndSwapB ||
2863 xop == Op_WeakCompareAndSwapS ||
2864 xop == Op_WeakCompareAndSwapL ||
2865 xop == Op_WeakCompareAndSwapP ||
2866 xop == Op_WeakCompareAndSwapN ||
2867 xop == Op_WeakCompareAndSwapI ||
2868 xop == Op_CompareAndSwapB ||
2869 xop == Op_CompareAndSwapS ||
2870 xop == Op_CompareAndSwapL ||
2871 xop == Op_CompareAndSwapP ||
2872 xop == Op_CompareAndSwapN ||
2873 xop == Op_CompareAndSwapI ||
2874 BarrierSet::barrier_set()->barrier_set_c2()->matcher_is_store_load_barrier(x, xop)) {
2875 return true;
2876 }
2877
2878 // Op_FastLock previously appeared in the Op_* list above.
2879 if (xop == Op_FastLock) {
2880 return true;
2881 }
2882
2883 if (x->is_MemBar()) {
2884 // We must retain this membar if there is an upcoming volatile
2885 // load, which will be followed by acquire membar.
2886 if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2887 return false;
2888 } else {
2889 // For other kinds of barriers, check by pretending we
2890 // are them, and seeing if we can be removed.
2891 return post_store_load_barrier(x->as_MemBar());
2892 }
2893 }
2894
2895 // probably not necessary to check for these
2896 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2897 return false;
2898 }
2899 }
2900 return false;
2901 }
2902
2903 // Check whether node n is a branch to an uncommon trap that we could
2904 // optimize as test with very high branch costs in case of going to
2905 // the uncommon trap. The code must be able to be recompiled to use
2906 // a cheaper test.
2907 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2908 // Don't do it for natives, adapters, or runtime stubs
2909 Compile *C = Compile::current();
2910 if (!C->is_method_compilation()) return false;
2911
2912 assert(n->is_If(), "You should only call this on if nodes.");
2913 IfNode *ifn = n->as_If();
2914
2915 Node *ifFalse = nullptr;
2916 for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2917 if (ifn->fast_out(i)->is_IfFalse()) {
2918 ifFalse = ifn->fast_out(i);
2919 break;
2920 }
2921 }
2922 assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2923
2924 Node *reg = ifFalse;
2925 int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
2926 // Alternatively use visited set? Seems too expensive.
2927 while (reg != nullptr && cnt > 0) {
2928 CallNode *call = nullptr;
2929 RegionNode *nxt_reg = nullptr;
2930 for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2931 Node *o = reg->fast_out(i);
2932 if (o->is_Call()) {
2933 call = o->as_Call();
2934 }
2935 if (o->is_Region()) {
2936 nxt_reg = o->as_Region();
2937 }
2938 }
2939
2940 if (call &&
2941 call->entry_point() == OptoRuntime::uncommon_trap_blob()->entry_point()) {
2942 const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2943 if (trtype->isa_int() && trtype->is_int()->is_con()) {
2944 jint tr_con = trtype->is_int()->get_con();
2945 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2946 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2947 assert((int)reason < (int)BitsPerInt, "recode bit map");
2948
2949 if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2950 && action != Deoptimization::Action_none) {
2951 // This uncommon trap is sure to recompile, eventually.
2952 // When that happens, C->too_many_traps will prevent
2953 // this transformation from happening again.
2954 return true;
2955 }
2956 }
2957 }
2958
2959 reg = nxt_reg;
2960 cnt--;
2961 }
2962
2963 return false;
2964 }
2965
2966 //=============================================================================
2967 //---------------------------State---------------------------------------------
2968 State::State(void) : _rule() {
2969 #ifdef ASSERT
2970 _id = 0;
2971 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2972 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2973 #endif
2974 }
2975
2976 #ifdef ASSERT
2977 State::~State() {
2978 _id = 99;
2979 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2980 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2981 memset(_cost, -3, sizeof(_cost));
2982 memset(_rule, -3, sizeof(_rule));
2983 }
2984 #endif
2985
2986 #ifndef PRODUCT
2987 //---------------------------dump----------------------------------------------
2988 void State::dump() {
2989 tty->print("\n");
2990 dump(0);
2991 }
2992
2993 void State::dump(int depth) {
2994 for (int j = 0; j < depth; j++) {
2995 tty->print(" ");
2996 }
2997 tty->print("--N: ");
2998 _leaf->dump();
2999 uint i;
3000 for (i = 0; i < _LAST_MACH_OPER; i++) {
3001 // Check for valid entry
3002 if (valid(i)) {
3003 for (int j = 0; j < depth; j++) {
3004 tty->print(" ");
3005 }
3006 assert(cost(i) != max_juint, "cost must be a valid value");
3007 assert(rule(i) < _last_Mach_Node, "rule[i] must be valid rule");
3008 tty->print_cr("%s %d %s",
3009 ruleName[i], cost(i), ruleName[rule(i)] );
3010 }
3011 }
3012 tty->cr();
3013
3014 for (i = 0; i < 2; i++) {
3015 if (_kids[i]) {
3016 _kids[i]->dump(depth + 1);
3017 }
3018 }
3019 }
3020 #endif