6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/shared/collectedHeap.inline.hpp"
28 #include "gc/shared/tlab_globals.hpp"
29 #include "libadt/vectset.hpp"
30 #include "memory/universe.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/arraycopynode.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/cfgnode.hpp"
36 #include "opto/compile.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/graphKit.hpp"
39 #include "opto/intrinsicnode.hpp"
40 #include "opto/locknode.hpp"
41 #include "opto/loopnode.hpp"
42 #include "opto/macro.hpp"
43 #include "opto/memnode.hpp"
44 #include "opto/narrowptrnode.hpp"
45 #include "opto/node.hpp"
46 #include "opto/opaquenode.hpp"
47 #include "opto/phaseX.hpp"
48 #include "opto/rootnode.hpp"
49 #include "opto/runtime.hpp"
50 #include "opto/subnode.hpp"
51 #include "opto/subtypenode.hpp"
52 #include "opto/type.hpp"
53 #include "prims/jvmtiExport.hpp"
54 #include "runtime/continuation.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "utilities/macros.hpp"
57 #include "utilities/powerOfTwo.hpp"
58 #if INCLUDE_G1GC
59 #include "gc/g1/g1ThreadLocalData.hpp"
60 #endif // INCLUDE_G1GC
61
62
63 //
64 // Replace any references to "oldref" in inputs to "use" with "newref".
65 // Returns the number of replacements made.
66 //
67 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
68 int nreplacements = 0;
69 uint req = use->req();
70 for (uint j = 0; j < use->len(); j++) {
71 Node *uin = use->in(j);
72 if (uin == oldref) {
73 if (j < req)
74 use->set_req(j, newref);
75 else
76 use->set_prec(j, newref);
77 nreplacements++;
78 } else if (j >= req && uin == nullptr) {
79 break;
80 }
81 }
82 return nreplacements;
83 }
84
85 void PhaseMacroExpand::migrate_outs(Node *old, Node *target) {
86 assert(old != nullptr, "sanity");
87 for (DUIterator_Fast imax, i = old->fast_outs(imax); i < imax; i++) {
88 Node* use = old->fast_out(i);
89 _igvn.rehash_node_delayed(use);
90 imax -= replace_input(use, old, target);
91 // back up iterator
92 --i;
93 }
94 assert(old->outcnt() == 0, "all uses must be deleted");
95 }
96
97 Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) {
98 Node* cmp;
99 if (mask != 0) {
100 Node* and_node = transform_later(new AndXNode(word, MakeConX(mask)));
101 cmp = transform_later(new CmpXNode(and_node, MakeConX(bits)));
102 } else {
103 cmp = word;
104 }
105 Node* bol = transform_later(new BoolNode(cmp, BoolTest::ne));
106 IfNode* iff = new IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
107 transform_later(iff);
108
109 // Fast path taken.
110 Node *fast_taken = transform_later(new IfFalseNode(iff));
111
112 // Fast path not-taken, i.e. slow path
113 Node *slow_taken = transform_later(new IfTrueNode(iff));
114
115 if (return_fast_path) {
116 region->init_req(edge, slow_taken); // Capture slow-control
139 // Slow-path call
140 CallNode *call = leaf_name
141 ? (CallNode*)new CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
142 : (CallNode*)new CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), TypeRawPtr::BOTTOM );
143
144 // Slow path call has no side-effects, uses few values
145 copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
146 if (parm0 != nullptr) call->init_req(TypeFunc::Parms+0, parm0);
147 if (parm1 != nullptr) call->init_req(TypeFunc::Parms+1, parm1);
148 if (parm2 != nullptr) call->init_req(TypeFunc::Parms+2, parm2);
149 call->copy_call_debug_info(&_igvn, oldcall);
150 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
151 _igvn.replace_node(oldcall, call);
152 transform_later(call);
153
154 return call;
155 }
156
157 void PhaseMacroExpand::eliminate_gc_barrier(Node* p2x) {
158 BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
159 bs->eliminate_gc_barrier(this, p2x);
160 #ifndef PRODUCT
161 if (PrintOptoStatistics) {
162 Atomic::inc(&PhaseMacroExpand::_GC_barriers_removed_counter);
163 }
164 #endif
165 }
166
167 // Search for a memory operation for the specified memory slice.
168 static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
169 Node *orig_mem = mem;
170 Node *alloc_mem = alloc->as_Allocate()->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
171 assert(alloc_mem != nullptr, "Allocation without a memory projection.");
172 const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
173 while (true) {
174 if (mem == alloc_mem || mem == start_mem ) {
175 return mem; // hit one of our sentinels
176 } else if (mem->is_MergeMem()) {
177 mem = mem->as_MergeMem()->memory_at(alias_idx);
178 } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
179 Node *in = mem->in(0);
194 ArrayCopyNode* ac = nullptr;
195 if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) {
196 if (ac != nullptr) {
197 assert(ac->is_clonebasic(), "Only basic clone is a non escaping clone");
198 return ac;
199 }
200 }
201 mem = in->in(TypeFunc::Memory);
202 } else {
203 #ifdef ASSERT
204 in->dump();
205 mem->dump();
206 assert(false, "unexpected projection");
207 #endif
208 }
209 } else if (mem->is_Store()) {
210 const TypePtr* atype = mem->as_Store()->adr_type();
211 int adr_idx = phase->C->get_alias_index(atype);
212 if (adr_idx == alias_idx) {
213 assert(atype->isa_oopptr(), "address type must be oopptr");
214 int adr_offset = atype->offset();
215 uint adr_iid = atype->is_oopptr()->instance_id();
216 // Array elements references have the same alias_idx
217 // but different offset and different instance_id.
218 if (adr_offset == offset && adr_iid == alloc->_idx) {
219 return mem;
220 }
221 } else {
222 assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
223 }
224 mem = mem->in(MemNode::Memory);
225 } else if (mem->is_ClearArray()) {
226 if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
227 // Can not bypass initialization of the instance
228 // we are looking.
229 debug_only(intptr_t offset;)
230 assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
231 InitializeNode* init = alloc->as_Allocate()->initialization();
232 // We are looking for stored value, return Initialize node
233 // or memory edge from Allocate node.
234 if (init != nullptr) {
239 }
240 // Otherwise skip it (the call updated 'mem' value).
241 } else if (mem->Opcode() == Op_SCMemProj) {
242 mem = mem->in(0);
243 Node* adr = nullptr;
244 if (mem->is_LoadStore()) {
245 adr = mem->in(MemNode::Address);
246 } else {
247 assert(mem->Opcode() == Op_EncodeISOArray ||
248 mem->Opcode() == Op_StrCompressedCopy, "sanity");
249 adr = mem->in(3); // Destination array
250 }
251 const TypePtr* atype = adr->bottom_type()->is_ptr();
252 int adr_idx = phase->C->get_alias_index(atype);
253 if (adr_idx == alias_idx) {
254 DEBUG_ONLY(mem->dump();)
255 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
256 return nullptr;
257 }
258 mem = mem->in(MemNode::Memory);
259 } else if (mem->Opcode() == Op_StrInflatedCopy) {
260 Node* adr = mem->in(3); // Destination array
261 const TypePtr* atype = adr->bottom_type()->is_ptr();
262 int adr_idx = phase->C->get_alias_index(atype);
263 if (adr_idx == alias_idx) {
264 DEBUG_ONLY(mem->dump();)
265 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
266 return nullptr;
267 }
268 mem = mem->in(MemNode::Memory);
269 } else {
270 return mem;
271 }
272 assert(mem != orig_mem, "dead memory loop");
273 }
274 }
275
276 // Generate loads from source of the arraycopy for fields of
277 // destination needed at a deoptimization point
278 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc) {
279 BasicType bt = ft;
284 }
285 Node* res = nullptr;
286 if (ac->is_clonebasic()) {
287 assert(ac->in(ArrayCopyNode::Src) != ac->in(ArrayCopyNode::Dest), "clone source equals destination");
288 Node* base = ac->in(ArrayCopyNode::Src);
289 Node* adr = _igvn.transform(new AddPNode(base, base, _igvn.MakeConX(offset)));
290 const TypePtr* adr_type = _igvn.type(base)->is_ptr()->add_offset(offset);
291 MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem();
292 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
293 res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt);
294 } else {
295 if (ac->modifies(offset, offset, &_igvn, true)) {
296 assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result");
297 uint shift = exact_log2(type2aelembytes(bt));
298 Node* src_pos = ac->in(ArrayCopyNode::SrcPos);
299 Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
300 const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int();
301 const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int();
302
303 Node* adr = nullptr;
304 const TypePtr* adr_type = nullptr;
305 if (src_pos_t->is_con() && dest_pos_t->is_con()) {
306 intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset;
307 Node* base = ac->in(ArrayCopyNode::Src);
308 adr = _igvn.transform(new AddPNode(base, base, _igvn.MakeConX(off)));
309 adr_type = _igvn.type(base)->is_ptr()->add_offset(off);
310 if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
311 // Don't emit a new load from src if src == dst but try to get the value from memory instead
312 return value_from_mem(ac->in(TypeFunc::Memory), ctl, ft, ftype, adr_type->isa_oopptr(), alloc);
313 }
314 } else {
315 Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
316 #ifdef _LP64
317 diff = _igvn.transform(new ConvI2LNode(diff));
318 #endif
319 diff = _igvn.transform(new LShiftXNode(diff, _igvn.intcon(shift)));
320
321 Node* off = _igvn.transform(new AddXNode(_igvn.MakeConX(offset), diff));
322 Node* base = ac->in(ArrayCopyNode::Src);
323 adr = _igvn.transform(new AddPNode(base, base, off));
324 adr_type = _igvn.type(base)->is_ptr()->add_offset(Type::OffsetBot);
325 if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
326 // Non constant offset in the array: we can't statically
327 // determine the value
328 return nullptr;
329 }
330 }
331 MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem();
332 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
333 res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt);
334 }
335 }
336 if (res != nullptr) {
337 if (ftype->isa_narrowoop()) {
338 // PhaseMacroExpand::scalar_replacement adds DecodeN nodes
339 res = _igvn.transform(new EncodePNode(res, ftype));
340 }
341 return res;
342 }
343 return nullptr;
344 }
345
346 //
347 // Given a Memory Phi, compute a value Phi containing the values from stores
348 // on the input paths.
349 // Note: this function is recursive, its depth is limited by the "level" argument
350 // Returns the computed Phi, or null if it cannot compute it.
351 Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
352 assert(mem->is_Phi(), "sanity");
353 int alias_idx = C->get_alias_index(adr_t);
354 int offset = adr_t->offset();
355 int instance_id = adr_t->instance_id();
356
357 // Check if an appropriate value phi already exists.
358 Node* region = mem->in(0);
359 for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
360 Node* phi = region->fast_out(k);
361 if (phi->is_Phi() && phi != mem &&
362 phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) {
363 return phi;
364 }
365 }
366 // Check if an appropriate new value phi already exists.
367 Node* new_phi = value_phis->find(mem->_idx);
368 if (new_phi != nullptr)
369 return new_phi;
370
371 if (level <= 0) {
372 return nullptr; // Give up: phi tree too deep
373 }
374 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
375 Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
376 assert(alloc_mem != nullptr, "Allocation without a memory projection.");
377
378 uint length = mem->req();
379 GrowableArray <Node *> values(length, length, nullptr);
380
381 // create a new Phi for the value
382 PhiNode *phi = new PhiNode(mem->in(0), phi_type, nullptr, mem->_idx, instance_id, alias_idx, offset);
383 transform_later(phi);
384 value_phis->push(phi, mem->_idx);
385
386 for (uint j = 1; j < length; j++) {
387 Node *in = mem->in(j);
388 if (in == nullptr || in->is_top()) {
389 values.at_put(j, in);
390 } else {
391 Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
392 if (val == start_mem || val == alloc_mem) {
393 // hit a sentinel, return appropriate 0 value
394 values.at_put(j, _igvn.zerocon(ft));
395 continue;
396 }
397 if (val->is_Initialize()) {
398 val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
399 }
400 if (val == nullptr) {
401 return nullptr; // can't find a value on this path
402 }
403 if (val == mem) {
404 values.at_put(j, mem);
405 } else if (val->is_Store()) {
406 Node* n = val->in(MemNode::ValueIn);
407 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
408 n = bs->step_over_gc_barrier(n);
409 if (is_subword_type(ft)) {
410 n = Compile::narrow_value(ft, n, phi_type, &_igvn, true);
411 }
412 values.at_put(j, n);
413 } else if(val->is_Proj() && val->in(0) == alloc) {
414 values.at_put(j, _igvn.zerocon(ft));
415 } else if (val->is_Phi()) {
416 val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
417 if (val == nullptr) {
418 return nullptr;
419 }
420 values.at_put(j, val);
421 } else if (val->Opcode() == Op_SCMemProj) {
422 assert(val->in(0)->is_LoadStore() ||
423 val->in(0)->Opcode() == Op_EncodeISOArray ||
424 val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
425 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
426 return nullptr;
427 } else if (val->is_ArrayCopy()) {
428 Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
429 if (res == nullptr) {
430 return nullptr;
431 }
432 values.at_put(j, res);
433 } else {
434 DEBUG_ONLY( val->dump(); )
438 }
439 }
440 // Set Phi's inputs
441 for (uint j = 1; j < length; j++) {
442 if (values.at(j) == mem) {
443 phi->init_req(j, phi);
444 } else {
445 phi->init_req(j, values.at(j));
446 }
447 }
448 return phi;
449 }
450
451 // Search the last value stored into the object's field.
452 Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc) {
453 assert(adr_t->is_known_instance_field(), "instance required");
454 int instance_id = adr_t->instance_id();
455 assert((uint)instance_id == alloc->_idx, "wrong allocation");
456
457 int alias_idx = C->get_alias_index(adr_t);
458 int offset = adr_t->offset();
459 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
460 Node *alloc_ctrl = alloc->in(TypeFunc::Control);
461 Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
462 assert(alloc_mem != nullptr, "Allocation without a memory projection.");
463 VectorSet visited;
464
465 bool done = sfpt_mem == alloc_mem;
466 Node *mem = sfpt_mem;
467 while (!done) {
468 if (visited.test_set(mem->_idx)) {
469 return nullptr; // found a loop, give up
470 }
471 mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
472 if (mem == start_mem || mem == alloc_mem) {
473 done = true; // hit a sentinel, return appropriate 0 value
474 } else if (mem->is_Initialize()) {
475 mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
476 if (mem == nullptr) {
477 done = true; // Something go wrong.
478 } else if (mem->is_Store()) {
479 const TypePtr* atype = mem->as_Store()->adr_type();
480 assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
481 done = true;
482 }
483 } else if (mem->is_Store()) {
484 const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
485 assert(atype != nullptr, "address type must be oopptr");
486 assert(C->get_alias_index(atype) == alias_idx &&
487 atype->is_known_instance_field() && atype->offset() == offset &&
488 atype->instance_id() == instance_id, "store is correct memory slice");
489 done = true;
490 } else if (mem->is_Phi()) {
491 // try to find a phi's unique input
492 Node *unique_input = nullptr;
493 Node *top = C->top();
494 for (uint i = 1; i < mem->req(); i++) {
495 Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
496 if (n == nullptr || n == top || n == mem) {
497 continue;
498 } else if (unique_input == nullptr) {
499 unique_input = n;
500 } else if (unique_input != n) {
501 unique_input = top;
502 break;
503 }
504 }
505 if (unique_input != nullptr && unique_input != top) {
506 mem = unique_input;
507 } else {
508 done = true;
509 }
510 } else if (mem->is_ArrayCopy()) {
511 done = true;
512 } else {
513 DEBUG_ONLY( mem->dump(); )
514 assert(false, "unexpected node");
515 }
516 }
517 if (mem != nullptr) {
518 if (mem == start_mem || mem == alloc_mem) {
519 // hit a sentinel, return appropriate 0 value
520 return _igvn.zerocon(ft);
521 } else if (mem->is_Store()) {
522 Node* n = mem->in(MemNode::ValueIn);
523 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
524 n = bs->step_over_gc_barrier(n);
525 return n;
526 } else if (mem->is_Phi()) {
527 // attempt to produce a Phi reflecting the values on the input paths of the Phi
528 Node_Stack value_phis(8);
529 Node* phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
530 if (phi != nullptr) {
531 return phi;
532 } else {
533 // Kill all new Phis
534 while(value_phis.is_nonempty()) {
535 Node* n = value_phis.node();
536 _igvn.replace_node(n, C->top());
537 value_phis.pop();
538 }
539 }
540 } else if (mem->is_ArrayCopy()) {
541 Node* ctl = mem->in(0);
542 Node* m = mem->in(TypeFunc::Memory);
543 if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj()) {
544 // pin the loads in the uncommon trap path
545 ctl = sfpt_ctl;
546 m = sfpt_mem;
547 }
548 return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc);
549 }
550 }
551 // Something go wrong.
552 return nullptr;
553 }
554
555 // Check the possibility of scalar replacement.
556 bool PhaseMacroExpand::can_eliminate_allocation(PhaseIterGVN* igvn, AllocateNode *alloc, GrowableArray <SafePointNode *>* safepoints) {
557 // Scan the uses of the allocation to check for anything that would
558 // prevent us from eliminating it.
559 NOT_PRODUCT( const char* fail_eliminate = nullptr; )
560 DEBUG_ONLY( Node* disq_node = nullptr; )
561 bool can_eliminate = true;
562 bool reduce_merge_precheck = (safepoints == nullptr);
563
564 Node* res = alloc->result_cast();
565 const TypeOopPtr* res_type = nullptr;
566 if (res == nullptr) {
567 // All users were eliminated.
568 } else if (!res->is_CheckCastPP()) {
569 NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
570 can_eliminate = false;
571 } else {
572 res_type = igvn->type(res)->isa_oopptr();
573 if (res_type == nullptr) {
574 NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
575 can_eliminate = false;
576 } else if (res_type->isa_aryptr()) {
577 int length = alloc->in(AllocateNode::ALength)->find_int_con(-1);
578 if (length < 0) {
579 NOT_PRODUCT(fail_eliminate = "Array's size is not constant";)
580 can_eliminate = false;
581 }
582 }
583 }
584
585 if (can_eliminate && res != nullptr) {
586 BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
587 for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
588 j < jmax && can_eliminate; j++) {
589 Node* use = res->fast_out(j);
590
591 if (use->is_AddP()) {
592 const TypePtr* addp_type = igvn->type(use)->is_ptr();
593 int offset = addp_type->offset();
594
595 if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
596 NOT_PRODUCT(fail_eliminate = "Undefined field reference";)
597 can_eliminate = false;
598 break;
599 }
600 for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
601 k < kmax && can_eliminate; k++) {
602 Node* n = use->fast_out(k);
603 if (!n->is_Store() && n->Opcode() != Op_CastP2X && !bs->is_gc_pre_barrier_node(n) && !reduce_merge_precheck) {
604 DEBUG_ONLY(disq_node = n;)
605 if (n->is_Load() || n->is_LoadStore()) {
606 NOT_PRODUCT(fail_eliminate = "Field load";)
607 } else {
608 NOT_PRODUCT(fail_eliminate = "Not store field reference";)
614 (use->as_ArrayCopy()->is_clonebasic() ||
615 use->as_ArrayCopy()->is_arraycopy_validated() ||
616 use->as_ArrayCopy()->is_copyof_validated() ||
617 use->as_ArrayCopy()->is_copyofrange_validated()) &&
618 use->in(ArrayCopyNode::Dest) == res) {
619 // ok to eliminate
620 } else if (use->is_SafePoint()) {
621 SafePointNode* sfpt = use->as_SafePoint();
622 if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
623 // Object is passed as argument.
624 DEBUG_ONLY(disq_node = use;)
625 NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
626 can_eliminate = false;
627 }
628 Node* sfptMem = sfpt->memory();
629 if (sfptMem == nullptr || sfptMem->is_top()) {
630 DEBUG_ONLY(disq_node = use;)
631 NOT_PRODUCT(fail_eliminate = "null or TOP memory";)
632 can_eliminate = false;
633 } else if (!reduce_merge_precheck) {
634 safepoints->append_if_missing(sfpt);
635 }
636 } else if (reduce_merge_precheck && (use->is_Phi() || use->is_EncodeP() || use->Opcode() == Op_MemBarRelease)) {
637 // Nothing to do
638 } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
639 if (use->is_Phi()) {
640 if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
641 NOT_PRODUCT(fail_eliminate = "Object is return value";)
642 } else {
643 NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
644 }
645 DEBUG_ONLY(disq_node = use;)
646 } else {
647 if (use->Opcode() == Op_Return) {
648 NOT_PRODUCT(fail_eliminate = "Object is return value";)
649 } else {
650 NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
651 }
652 DEBUG_ONLY(disq_node = use;)
653 }
654 can_eliminate = false;
655 }
656 }
657 }
658
659 #ifndef PRODUCT
660 if (PrintEliminateAllocations && safepoints != nullptr) {
661 if (can_eliminate) {
662 tty->print("Scalar ");
663 if (res == nullptr)
664 alloc->dump();
665 else
666 res->dump();
667 } else if (alloc->_is_scalar_replaceable) {
668 tty->print("NotScalar (%s)", fail_eliminate);
669 if (res == nullptr)
670 alloc->dump();
671 else
672 res->dump();
673 #ifdef ASSERT
674 if (disq_node != nullptr) {
675 tty->print(" >>>> ");
676 disq_node->dump();
677 }
678 #endif /*ASSERT*/
679 }
680 }
681
682 if (TraceReduceAllocationMerges && !can_eliminate && reduce_merge_precheck) {
683 tty->print_cr("\tCan't eliminate allocation because '%s': ", fail_eliminate != nullptr ? fail_eliminate : "");
684 DEBUG_ONLY(if (disq_node != nullptr) disq_node->dump();)
685 }
686 #endif
687 return can_eliminate;
717 JVMState *jvms = sfpt_done->jvms();
718 jvms->set_endoff(sfpt_done->req());
719 // Now make a pass over the debug information replacing any references
720 // to SafePointScalarObjectNode with the allocated object.
721 int start = jvms->debug_start();
722 int end = jvms->debug_end();
723 for (int i = start; i < end; i++) {
724 if (sfpt_done->in(i)->is_SafePointScalarObject()) {
725 SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
726 if (scobj->first_index(jvms) == sfpt_done->req() &&
727 scobj->n_fields() == (uint)nfields) {
728 assert(scobj->alloc() == alloc, "sanity");
729 sfpt_done->set_req(i, res);
730 }
731 }
732 }
733 _igvn._worklist.push(sfpt_done);
734 }
735 }
736
737 SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_description(AllocateNode *alloc, SafePointNode* sfpt) {
738 // Fields of scalar objs are referenced only at the end
739 // of regular debuginfo at the last (youngest) JVMS.
740 // Record relative start index.
741 ciInstanceKlass* iklass = nullptr;
742 BasicType basic_elem_type = T_ILLEGAL;
743 const Type* field_type = nullptr;
744 const TypeOopPtr* res_type = nullptr;
745 int nfields = 0;
746 int array_base = 0;
747 int element_size = 0;
748 uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
749 Node* res = alloc->result_cast();
750
751 assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
752 assert(sfpt->jvms() != nullptr, "missed JVMS");
753
754 if (res != nullptr) { // Could be null when there are no users
755 res_type = _igvn.type(res)->isa_oopptr();
756
757 if (res_type->isa_instptr()) {
758 // find the fields of the class which will be needed for safepoint debug information
759 iklass = res_type->is_instptr()->instance_klass();
760 nfields = iklass->nof_nonstatic_fields();
761 } else {
762 // find the array's elements which will be needed for safepoint debug information
763 nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
764 assert(nfields >= 0, "must be an array klass.");
765 basic_elem_type = res_type->is_aryptr()->elem()->array_element_basic_type();
766 array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
767 element_size = type2aelembytes(basic_elem_type);
768 field_type = res_type->is_aryptr()->elem();
769 }
770 }
771
772 SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type, alloc, first_ind, sfpt->jvms()->depth(), nfields);
773 sobj->init_req(0, C->root());
774 transform_later(sobj);
775
776 // Scan object's fields adding an input to the safepoint for each field.
777 for (int j = 0; j < nfields; j++) {
778 intptr_t offset;
779 ciField* field = nullptr;
780 if (iklass != nullptr) {
781 field = iklass->nonstatic_field_at(j);
782 offset = field->offset_in_bytes();
783 ciType* elem_type = field->type();
784 basic_elem_type = field->layout_type();
785
786 // The next code is taken from Parse::do_get_xxx().
787 if (is_reference_type(basic_elem_type)) {
788 if (!elem_type->is_loaded()) {
789 field_type = TypeInstPtr::BOTTOM;
790 } else if (field != nullptr && field->is_static_constant()) {
791 ciObject* con = field->constant_value().as_object();
792 // Do not "join" in the previous type; it doesn't add value,
793 // and may yield a vacuous result if the field is of interface type.
794 field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
795 assert(field_type != nullptr, "field singleton type must be consistent");
796 } else {
797 field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
798 }
799 if (UseCompressedOops) {
800 field_type = field_type->make_narrowoop();
801 basic_elem_type = T_NARROWOOP;
802 }
803 } else {
804 field_type = Type::get_const_basic_type(basic_elem_type);
805 }
806 } else {
807 offset = array_base + j * (intptr_t)element_size;
808 }
809
810 const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr();
811
812 Node *field_val = value_from_mem(sfpt->memory(), sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc);
813
814 // We weren't able to find a value for this field,
815 // give up on eliminating this allocation.
816 if (field_val == nullptr) {
817 uint last = sfpt->req() - 1;
818 for (int k = 0; k < j; k++) {
819 sfpt->del_req(last--);
820 }
821 _igvn._worklist.push(sfpt);
822
823 #ifndef PRODUCT
824 if (PrintEliminateAllocations) {
825 if (field != nullptr) {
826 tty->print("=== At SafePoint node %d can't find value of field: ", sfpt->_idx);
827 field->print();
828 int field_idx = C->get_alias_index(field_addr_type);
829 tty->print(" (alias_idx=%d)", field_idx);
830 } else { // Array's element
831 tty->print("=== At SafePoint node %d can't find value of array element [%d]", sfpt->_idx, j);
832 }
833 tty->print(", which prevents elimination of: ");
834 if (res == nullptr)
835 alloc->dump();
836 else
837 res->dump();
838 }
839 #endif
840
841 return nullptr;
842 }
843
844 if (UseCompressedOops && field_type->isa_narrowoop()) {
845 // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
846 // to be able scalar replace the allocation.
847 if (field_val->is_EncodeP()) {
848 field_val = field_val->in(1);
849 } else {
850 field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
851 }
852 }
853 sfpt->add_req(field_val);
854 }
855
856 sfpt->jvms()->set_endoff(sfpt->req());
857
858 return sobj;
859 }
860
861 // Do scalar replacement.
862 bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
863 GrowableArray <SafePointNode *> safepoints_done;
864 Node* res = alloc->result_cast();
865 assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
866
867 // Process the safepoint uses
868 while (safepoints.length() > 0) {
869 SafePointNode* sfpt = safepoints.pop();
870 SafePointScalarObjectNode* sobj = create_scalarized_object_description(alloc, sfpt);
871
872 if (sobj == nullptr) {
873 undo_previous_scalarizations(safepoints_done, alloc);
874 return false;
875 }
876
877 // Now make a pass over the debug information replacing any references
878 // to the allocated object with "sobj"
879 JVMState *jvms = sfpt->jvms();
880 sfpt->replace_edges_in_range(res, sobj, jvms->debug_start(), jvms->debug_end(), &_igvn);
881 _igvn._worklist.push(sfpt);
882
883 // keep it for rollback
884 safepoints_done.append_if_missing(sfpt);
885 }
886
887 return true;
888 }
889
890 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
891 Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
892 Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
893 if (ctl_proj != nullptr) {
894 igvn.replace_node(ctl_proj, n->in(0));
895 }
896 if (mem_proj != nullptr) {
897 igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
898 }
899 }
900
901 // Process users of eliminated allocation.
902 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
903 Node* res = alloc->result_cast();
904 if (res != nullptr) {
905 for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
906 Node *use = res->last_out(j);
907 uint oc1 = res->outcnt();
908
909 if (use->is_AddP()) {
910 for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
911 Node *n = use->last_out(k);
912 uint oc2 = use->outcnt();
913 if (n->is_Store()) {
914 #ifdef ASSERT
915 // Verify that there is no dependent MemBarVolatile nodes,
916 // they should be removed during IGVN, see MemBarNode::Ideal().
917 for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
918 p < pmax; p++) {
919 Node* mb = n->fast_out(p);
920 assert(mb->is_Initialize() || !mb->is_MemBar() ||
921 mb->req() <= MemBarNode::Precedent ||
922 mb->in(MemBarNode::Precedent) != n,
923 "MemBarVolatile should be eliminated for non-escaping object");
924 }
925 #endif
926 _igvn.replace_node(n, n->in(MemNode::Memory));
927 } else {
928 eliminate_gc_barrier(n);
929 }
930 k -= (oc2 - use->outcnt());
931 }
932 _igvn.remove_dead_node(use);
933 } else if (use->is_ArrayCopy()) {
934 // Disconnect ArrayCopy node
935 ArrayCopyNode* ac = use->as_ArrayCopy();
936 if (ac->is_clonebasic()) {
937 Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out();
938 disconnect_projections(ac, _igvn);
939 assert(alloc->in(TypeFunc::Memory)->is_Proj() && alloc->in(TypeFunc::Memory)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation");
940 Node* membar_before = alloc->in(TypeFunc::Memory)->in(0);
941 disconnect_projections(membar_before->as_MemBar(), _igvn);
942 if (membar_after->is_MemBar()) {
943 disconnect_projections(membar_after->as_MemBar(), _igvn);
944 }
945 } else {
946 assert(ac->is_arraycopy_validated() ||
947 ac->is_copyof_validated() ||
948 ac->is_copyofrange_validated(), "unsupported");
949 CallProjections callprojs;
950 ac->extract_projections(&callprojs, true);
951
952 _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O));
953 _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory));
954 _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control));
955
956 // Set control to top. IGVN will remove the remaining projections
957 ac->set_req(0, top());
958 ac->replace_edge(res, top(), &_igvn);
959
960 // Disconnect src right away: it can help find new
961 // opportunities for allocation elimination
962 Node* src = ac->in(ArrayCopyNode::Src);
963 ac->replace_edge(src, top(), &_igvn);
964 // src can be top at this point if src and dest of the
965 // arraycopy were the same
966 if (src->outcnt() == 0 && !src->is_top()) {
967 _igvn.remove_dead_node(src);
968 }
969 }
970 _igvn._worklist.push(ac);
971 } else {
972 eliminate_gc_barrier(use);
973 }
974 j -= (oc1 - res->outcnt());
975 }
976 assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
977 _igvn.remove_dead_node(res);
978 }
979
980 //
981 // Process other users of allocation's projections
982 //
983 if (_callprojs.resproj != nullptr && _callprojs.resproj->outcnt() != 0) {
984 // First disconnect stores captured by Initialize node.
985 // If Initialize node is eliminated first in the following code,
986 // it will kill such stores and DUIterator_Last will assert.
987 for (DUIterator_Fast jmax, j = _callprojs.resproj->fast_outs(jmax); j < jmax; j++) {
988 Node* use = _callprojs.resproj->fast_out(j);
989 if (use->is_AddP()) {
990 // raw memory addresses used only by the initialization
991 _igvn.replace_node(use, C->top());
992 --j; --jmax;
993 }
994 }
995 for (DUIterator_Last jmin, j = _callprojs.resproj->last_outs(jmin); j >= jmin; ) {
996 Node* use = _callprojs.resproj->last_out(j);
997 uint oc1 = _callprojs.resproj->outcnt();
998 if (use->is_Initialize()) {
999 // Eliminate Initialize node.
1000 InitializeNode *init = use->as_Initialize();
1001 assert(init->outcnt() <= 2, "only a control and memory projection expected");
1002 Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
1003 if (ctrl_proj != nullptr) {
1004 _igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control));
1005 #ifdef ASSERT
1006 // If the InitializeNode has no memory out, it will die, and tmp will become null
1007 Node* tmp = init->in(TypeFunc::Control);
1008 assert(tmp == nullptr || tmp == _callprojs.fallthrough_catchproj, "allocation control projection");
1009 #endif
1010 }
1011 Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory);
1012 if (mem_proj != nullptr) {
1013 Node *mem = init->in(TypeFunc::Memory);
1014 #ifdef ASSERT
1015 if (mem->is_MergeMem()) {
1016 assert(mem->in(TypeFunc::Memory) == _callprojs.fallthrough_memproj, "allocation memory projection");
1017 } else {
1018 assert(mem == _callprojs.fallthrough_memproj, "allocation memory projection");
1019 }
1020 #endif
1021 _igvn.replace_node(mem_proj, mem);
1022 }
1023 } else {
1024 assert(false, "only Initialize or AddP expected");
1025 }
1026 j -= (oc1 - _callprojs.resproj->outcnt());
1027 }
1028 }
1029 if (_callprojs.fallthrough_catchproj != nullptr) {
1030 _igvn.replace_node(_callprojs.fallthrough_catchproj, alloc->in(TypeFunc::Control));
1031 }
1032 if (_callprojs.fallthrough_memproj != nullptr) {
1033 _igvn.replace_node(_callprojs.fallthrough_memproj, alloc->in(TypeFunc::Memory));
1034 }
1035 if (_callprojs.catchall_memproj != nullptr) {
1036 _igvn.replace_node(_callprojs.catchall_memproj, C->top());
1037 }
1038 if (_callprojs.fallthrough_ioproj != nullptr) {
1039 _igvn.replace_node(_callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
1040 }
1041 if (_callprojs.catchall_ioproj != nullptr) {
1042 _igvn.replace_node(_callprojs.catchall_ioproj, C->top());
1043 }
1044 if (_callprojs.catchall_catchproj != nullptr) {
1045 _igvn.replace_node(_callprojs.catchall_catchproj, C->top());
1046 }
1047 }
1048
1049 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1050 // If reallocation fails during deoptimization we'll pop all
1051 // interpreter frames for this compiled frame and that won't play
1052 // nice with JVMTI popframe.
1053 // We avoid this issue by eager reallocation when the popframe request
1054 // is received.
1055 if (!EliminateAllocations || !alloc->_is_non_escaping) {
1056 return false;
1057 }
1058 Node* klass = alloc->in(AllocateNode::KlassNode);
1059 const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1060 Node* res = alloc->result_cast();
1061 // Eliminate boxing allocations which are not used
1062 // regardless scalar replaceable status.
1063 bool boxing_alloc = C->eliminate_boxing() &&
1064 tklass->isa_instklassptr() &&
1065 tklass->is_instklassptr()->instance_klass()->is_box_klass();
1066 if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != nullptr))) {
1067 return false;
1068 }
1069
1070 alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1071
1072 GrowableArray <SafePointNode *> safepoints;
1073 if (!can_eliminate_allocation(&_igvn, alloc, &safepoints)) {
1074 return false;
1075 }
1076
1077 if (!alloc->_is_scalar_replaceable) {
1078 assert(res == nullptr, "sanity");
1079 // We can only eliminate allocation if all debug info references
1080 // are already replaced with SafePointScalarObject because
1081 // we can't search for a fields value without instance_id.
1082 if (safepoints.length() > 0) {
1083 return false;
1084 }
1085 }
1086
1087 if (!scalar_replacement(alloc, safepoints)) {
1088 return false;
1089 }
1090
1091 CompileLog* log = C->log();
1092 if (log != nullptr) {
1093 log->head("eliminate_allocation type='%d'",
1094 log->identify(tklass->exact_klass()));
1095 JVMState* p = alloc->jvms();
1096 while (p != nullptr) {
1097 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1098 p = p->caller();
1099 }
1100 log->tail("eliminate_allocation");
1101 }
1102
1103 process_users_of_allocation(alloc);
1104
1105 #ifndef PRODUCT
1106 if (PrintEliminateAllocations) {
1107 if (alloc->is_AllocateArray())
1108 tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1109 else
1110 tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1111 }
1112 #endif
1113
1114 return true;
1115 }
1116
1117 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1118 // EA should remove all uses of non-escaping boxing node.
1119 if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != nullptr) {
1120 return false;
1121 }
1122
1123 assert(boxing->result_cast() == nullptr, "unexpected boxing node result");
1124
1125 boxing->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1126
1127 const TypeTuple* r = boxing->tf()->range();
1128 assert(r->cnt() > TypeFunc::Parms, "sanity");
1129 const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1130 assert(t != nullptr, "sanity");
1131
1132 CompileLog* log = C->log();
1133 if (log != nullptr) {
1134 log->head("eliminate_boxing type='%d'",
1135 log->identify(t->instance_klass()));
1136 JVMState* p = boxing->jvms();
1137 while (p != nullptr) {
1138 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1139 p = p->caller();
1140 }
1141 log->tail("eliminate_boxing");
1142 }
1143
1144 process_users_of_allocation(boxing);
1145
1146 #ifndef PRODUCT
1147 if (PrintEliminateAllocations) {
1291 }
1292 }
1293 #endif
1294 yank_alloc_node(alloc);
1295 return;
1296 }
1297 }
1298
1299 enum { too_big_or_final_path = 1, need_gc_path = 2 };
1300 Node *slow_region = nullptr;
1301 Node *toobig_false = ctrl;
1302
1303 // generate the initial test if necessary
1304 if (initial_slow_test != nullptr ) {
1305 assert (expand_fast_path, "Only need test if there is a fast path");
1306 slow_region = new RegionNode(3);
1307
1308 // Now make the initial failure test. Usually a too-big test but
1309 // might be a TRUE for finalizers or a fancy class check for
1310 // newInstance0.
1311 IfNode *toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
1312 transform_later(toobig_iff);
1313 // Plug the failing-too-big test into the slow-path region
1314 Node *toobig_true = new IfTrueNode( toobig_iff );
1315 transform_later(toobig_true);
1316 slow_region ->init_req( too_big_or_final_path, toobig_true );
1317 toobig_false = new IfFalseNode( toobig_iff );
1318 transform_later(toobig_false);
1319 } else {
1320 // No initial test, just fall into next case
1321 assert(allocation_has_use || !expand_fast_path, "Should already have been handled");
1322 toobig_false = ctrl;
1323 debug_only(slow_region = NodeSentinel);
1324 }
1325
1326 // If we are here there are several possibilities
1327 // - expand_fast_path is false - then only a slow path is expanded. That's it.
1328 // no_initial_check means a constant allocation.
1329 // - If check always evaluates to false -> expand_fast_path is false (see above)
1330 // - If check always evaluates to true -> directly into fast path (but may bailout to slowpath)
1331 // if !allocation_has_use the fast path is empty
1332 // if !allocation_has_use && no_initial_check
1333 // - Then there are no fastpath that can fall out to slowpath -> no allocation code at all.
1334 // removed by yank_alloc_node above.
1335
1336 Node *slow_mem = mem; // save the current memory state for slow path
1337 // generate the fast allocation code unless we know that the initial test will always go slow
1338 if (expand_fast_path) {
1339 // Fast path modifies only raw memory.
1340 if (mem->is_MergeMem()) {
1341 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1342 }
1343
1344 // allocate the Region and Phi nodes for the result
1345 result_region = new RegionNode(3);
1346 result_phi_rawmem = new PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1347 result_phi_i_o = new PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
1348
1349 // Grab regular I/O before optional prefetch may change it.
1350 // Slow-path does no I/O so just set it to the original I/O.
1351 result_phi_i_o->init_req(slow_result_path, i_o);
1352
1353 // Name successful fast-path variables
1354 Node* fast_oop_ctrl;
1355 Node* fast_oop_rawmem;
1356 if (allocation_has_use) {
1357 Node* needgc_ctrl = nullptr;
1358 result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
1359
1360 intx prefetch_lines = length != nullptr ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
1361 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1362 Node* fast_oop = bs->obj_allocate(this, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
1363 fast_oop_ctrl, fast_oop_rawmem,
1364 prefetch_lines);
1365
1366 if (initial_slow_test != nullptr) {
1367 // This completes all paths into the slow merge point
1368 slow_region->init_req(need_gc_path, needgc_ctrl);
1369 transform_later(slow_region);
1370 } else {
1371 // No initial slow path needed!
1372 // Just fall from the need-GC path straight into the VM call.
1373 slow_region = needgc_ctrl;
1374 }
1375
1393 result_phi_i_o ->init_req(fast_result_path, i_o);
1394 result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
1395 } else {
1396 slow_region = ctrl;
1397 result_phi_i_o = i_o; // Rename it to use in the following code.
1398 }
1399
1400 // Generate slow-path call
1401 CallNode *call = new CallStaticJavaNode(slow_call_type, slow_call_address,
1402 OptoRuntime::stub_name(slow_call_address),
1403 TypePtr::BOTTOM);
1404 call->init_req(TypeFunc::Control, slow_region);
1405 call->init_req(TypeFunc::I_O, top()); // does no i/o
1406 call->init_req(TypeFunc::Memory, slow_mem); // may gc ptrs
1407 call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1408 call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
1409
1410 call->init_req(TypeFunc::Parms+0, klass_node);
1411 if (length != nullptr) {
1412 call->init_req(TypeFunc::Parms+1, length);
1413 }
1414
1415 // Copy debug information and adjust JVMState information, then replace
1416 // allocate node with the call
1417 call->copy_call_debug_info(&_igvn, alloc);
1418 // For array allocations, copy the valid length check to the call node so Compile::final_graph_reshaping() can verify
1419 // that the call has the expected number of CatchProj nodes (in case the allocation always fails and the fallthrough
1420 // path dies).
1421 if (valid_length_test != nullptr) {
1422 call->add_req(valid_length_test);
1423 }
1424 if (expand_fast_path) {
1425 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
1426 } else {
1427 // Hook i_o projection to avoid its elimination during allocation
1428 // replacement (when only a slow call is generated).
1429 call->set_req(TypeFunc::I_O, result_phi_i_o);
1430 }
1431 _igvn.replace_node(alloc, call);
1432 transform_later(call);
1433
1434 // Identify the output projections from the allocate node and
1435 // adjust any references to them.
1436 // The control and io projections look like:
1437 //
1438 // v---Proj(ctrl) <-----+ v---CatchProj(ctrl)
1439 // Allocate Catch
1440 // ^---Proj(io) <-------+ ^---CatchProj(io)
1441 //
1442 // We are interested in the CatchProj nodes.
1443 //
1444 call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1445
1446 // An allocate node has separate memory projections for the uses on
1447 // the control and i_o paths. Replace the control memory projection with
1448 // result_phi_rawmem (unless we are only generating a slow call when
1449 // both memory projections are combined)
1450 if (expand_fast_path && _callprojs.fallthrough_memproj != nullptr) {
1451 migrate_outs(_callprojs.fallthrough_memproj, result_phi_rawmem);
1452 }
1453 // Now change uses of catchall_memproj to use fallthrough_memproj and delete
1454 // catchall_memproj so we end up with a call that has only 1 memory projection.
1455 if (_callprojs.catchall_memproj != nullptr ) {
1456 if (_callprojs.fallthrough_memproj == nullptr) {
1457 _callprojs.fallthrough_memproj = new ProjNode(call, TypeFunc::Memory);
1458 transform_later(_callprojs.fallthrough_memproj);
1459 }
1460 migrate_outs(_callprojs.catchall_memproj, _callprojs.fallthrough_memproj);
1461 _igvn.remove_dead_node(_callprojs.catchall_memproj);
1462 }
1463
1464 // An allocate node has separate i_o projections for the uses on the control
1465 // and i_o paths. Always replace the control i_o projection with result i_o
1466 // otherwise incoming i_o become dead when only a slow call is generated
1467 // (it is different from memory projections where both projections are
1468 // combined in such case).
1469 if (_callprojs.fallthrough_ioproj != nullptr) {
1470 migrate_outs(_callprojs.fallthrough_ioproj, result_phi_i_o);
1471 }
1472 // Now change uses of catchall_ioproj to use fallthrough_ioproj and delete
1473 // catchall_ioproj so we end up with a call that has only 1 i_o projection.
1474 if (_callprojs.catchall_ioproj != nullptr ) {
1475 if (_callprojs.fallthrough_ioproj == nullptr) {
1476 _callprojs.fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O);
1477 transform_later(_callprojs.fallthrough_ioproj);
1478 }
1479 migrate_outs(_callprojs.catchall_ioproj, _callprojs.fallthrough_ioproj);
1480 _igvn.remove_dead_node(_callprojs.catchall_ioproj);
1481 }
1482
1483 // if we generated only a slow call, we are done
1484 if (!expand_fast_path) {
1485 // Now we can unhook i_o.
1486 if (result_phi_i_o->outcnt() > 1) {
1487 call->set_req(TypeFunc::I_O, top());
1488 } else {
1489 assert(result_phi_i_o->unique_ctrl_out() == call, "sanity");
1490 // Case of new array with negative size known during compilation.
1491 // AllocateArrayNode::Ideal() optimization disconnect unreachable
1492 // following code since call to runtime will throw exception.
1493 // As result there will be no users of i_o after the call.
1494 // Leave i_o attached to this call to avoid problems in preceding graph.
1495 }
1496 return;
1497 }
1498
1499 if (_callprojs.fallthrough_catchproj != nullptr) {
1500 ctrl = _callprojs.fallthrough_catchproj->clone();
1501 transform_later(ctrl);
1502 _igvn.replace_node(_callprojs.fallthrough_catchproj, result_region);
1503 } else {
1504 ctrl = top();
1505 }
1506 Node *slow_result;
1507 if (_callprojs.resproj == nullptr) {
1508 // no uses of the allocation result
1509 slow_result = top();
1510 } else {
1511 slow_result = _callprojs.resproj->clone();
1512 transform_later(slow_result);
1513 _igvn.replace_node(_callprojs.resproj, result_phi_rawoop);
1514 }
1515
1516 // Plug slow-path into result merge point
1517 result_region->init_req( slow_result_path, ctrl);
1518 transform_later(result_region);
1519 if (allocation_has_use) {
1520 result_phi_rawoop->init_req(slow_result_path, slow_result);
1521 transform_later(result_phi_rawoop);
1522 }
1523 result_phi_rawmem->init_req(slow_result_path, _callprojs.fallthrough_memproj);
1524 transform_later(result_phi_rawmem);
1525 transform_later(result_phi_i_o);
1526 // This completes all paths into the result merge point
1527 }
1528
1529 // Remove alloc node that has no uses.
1530 void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
1531 Node* ctrl = alloc->in(TypeFunc::Control);
1532 Node* mem = alloc->in(TypeFunc::Memory);
1533 Node* i_o = alloc->in(TypeFunc::I_O);
1534
1535 alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1536 if (_callprojs.resproj != nullptr) {
1537 for (DUIterator_Fast imax, i = _callprojs.resproj->fast_outs(imax); i < imax; i++) {
1538 Node* use = _callprojs.resproj->fast_out(i);
1539 use->isa_MemBar()->remove(&_igvn);
1540 --imax;
1541 --i; // back up iterator
1542 }
1543 assert(_callprojs.resproj->outcnt() == 0, "all uses must be deleted");
1544 _igvn.remove_dead_node(_callprojs.resproj);
1545 }
1546 if (_callprojs.fallthrough_catchproj != nullptr) {
1547 migrate_outs(_callprojs.fallthrough_catchproj, ctrl);
1548 _igvn.remove_dead_node(_callprojs.fallthrough_catchproj);
1549 }
1550 if (_callprojs.catchall_catchproj != nullptr) {
1551 _igvn.rehash_node_delayed(_callprojs.catchall_catchproj);
1552 _callprojs.catchall_catchproj->set_req(0, top());
1553 }
1554 if (_callprojs.fallthrough_proj != nullptr) {
1555 Node* catchnode = _callprojs.fallthrough_proj->unique_ctrl_out();
1556 _igvn.remove_dead_node(catchnode);
1557 _igvn.remove_dead_node(_callprojs.fallthrough_proj);
1558 }
1559 if (_callprojs.fallthrough_memproj != nullptr) {
1560 migrate_outs(_callprojs.fallthrough_memproj, mem);
1561 _igvn.remove_dead_node(_callprojs.fallthrough_memproj);
1562 }
1563 if (_callprojs.fallthrough_ioproj != nullptr) {
1564 migrate_outs(_callprojs.fallthrough_ioproj, i_o);
1565 _igvn.remove_dead_node(_callprojs.fallthrough_ioproj);
1566 }
1567 if (_callprojs.catchall_memproj != nullptr) {
1568 _igvn.rehash_node_delayed(_callprojs.catchall_memproj);
1569 _callprojs.catchall_memproj->set_req(0, top());
1570 }
1571 if (_callprojs.catchall_ioproj != nullptr) {
1572 _igvn.rehash_node_delayed(_callprojs.catchall_ioproj);
1573 _callprojs.catchall_ioproj->set_req(0, top());
1574 }
1575 #ifndef PRODUCT
1576 if (PrintEliminateAllocations) {
1577 if (alloc->is_AllocateArray()) {
1578 tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1579 } else {
1580 tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1581 }
1582 }
1583 #endif
1584 _igvn.remove_dead_node(alloc);
1585 }
1586
1587 void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeNode* init,
1588 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem) {
1589 // If initialization is performed by an array copy, any required
1590 // MemBarStoreStore was already added. If the object does not
1591 // escape no need for a MemBarStoreStore. If the object does not
1592 // escape in its initializer and memory barrier (MemBarStoreStore or
1593 // stronger) is already added at exit of initializer, also no need
1671 Node* thread = new ThreadLocalNode();
1672 transform_later(thread);
1673
1674 call->init_req(TypeFunc::Parms + 0, thread);
1675 call->init_req(TypeFunc::Parms + 1, oop);
1676 call->init_req(TypeFunc::Control, ctrl);
1677 call->init_req(TypeFunc::I_O , top()); // does no i/o
1678 call->init_req(TypeFunc::Memory , rawmem);
1679 call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1680 call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
1681 transform_later(call);
1682 ctrl = new ProjNode(call, TypeFunc::Control);
1683 transform_later(ctrl);
1684 rawmem = new ProjNode(call, TypeFunc::Memory);
1685 transform_later(rawmem);
1686 }
1687 }
1688
1689 // Helper for PhaseMacroExpand::expand_allocate_common.
1690 // Initializes the newly-allocated storage.
1691 Node*
1692 PhaseMacroExpand::initialize_object(AllocateNode* alloc,
1693 Node* control, Node* rawmem, Node* object,
1694 Node* klass_node, Node* length,
1695 Node* size_in_bytes) {
1696 InitializeNode* init = alloc->initialization();
1697 // Store the klass & mark bits
1698 Node* mark_node = alloc->make_ideal_mark(&_igvn, object, control, rawmem);
1699 if (!mark_node->is_Con()) {
1700 transform_later(mark_node);
1701 }
1702 rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
1703
1704 rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
1705 int header_size = alloc->minimum_header_size(); // conservatively small
1706
1707 // Array length
1708 if (length != nullptr) { // Arrays need length field
1709 rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1710 // conservatively small header size:
1711 header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1712 if (_igvn.type(klass_node)->isa_aryklassptr()) { // we know the exact header size in most cases:
1713 BasicType elem = _igvn.type(klass_node)->is_klassptr()->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
1714 if (is_reference_type(elem, true)) {
1715 elem = T_OBJECT;
1716 }
1717 header_size = Klass::layout_helper_header_size(Klass::array_layout_helper(elem));
1718 }
1719 }
1720
1721 // Clear the object body, if necessary.
1722 if (init == nullptr) {
1723 // The init has somehow disappeared; be cautious and clear everything.
1724 //
1725 // This can happen if a node is allocated but an uncommon trap occurs
1726 // immediately. In this case, the Initialize gets associated with the
1727 // trap, and may be placed in a different (outer) loop, if the Allocate
1728 // is in a loop. If (this is rare) the inner loop gets unrolled, then
1729 // there can be two Allocates to one Initialize. The answer in all these
1730 // edge cases is safety first. It is always safe to clear immediately
1731 // within an Allocate, and then (maybe or maybe not) clear some more later.
1732 if (!(UseTLAB && ZeroTLAB)) {
1733 rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
1734 header_size, size_in_bytes,
1735 &_igvn);
1736 }
1737 } else {
1738 if (!init->is_complete()) {
1739 // Try to win by zeroing only what the init does not store.
1740 // We can also try to do some peephole optimizations,
1741 // such as combining some adjacent subword stores.
1742 rawmem = init->complete_stores(control, rawmem, object,
1743 header_size, size_in_bytes, &_igvn);
1744 }
1745 // We have no more use for this link, since the AllocateNode goes away:
1746 init->set_req(InitializeNode::RawAddress, top());
1747 // (If we keep the link, it just confuses the register allocator,
1748 // who thinks he sees a real use of the address by the membar.)
1749 }
1750
1751 return rawmem;
1752 }
1753
2114 #ifdef ASSERT
2115 if (!alock->is_coarsened()) {
2116 // Check that new "eliminated" BoxLock node is created.
2117 BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
2118 assert(oldbox->is_eliminated(), "should be done already");
2119 }
2120 #endif
2121
2122 alock->log_lock_optimization(C, "eliminate_lock");
2123
2124 #ifndef PRODUCT
2125 if (PrintEliminateLocks) {
2126 tty->print_cr("++++ Eliminated: %d %s '%s'", alock->_idx, (alock->is_Lock() ? "Lock" : "Unlock"), alock->kind_as_string());
2127 }
2128 #endif
2129
2130 Node* mem = alock->in(TypeFunc::Memory);
2131 Node* ctrl = alock->in(TypeFunc::Control);
2132 guarantee(ctrl != nullptr, "missing control projection, cannot replace_node() with null");
2133
2134 alock->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
2135 // There are 2 projections from the lock. The lock node will
2136 // be deleted when its last use is subsumed below.
2137 assert(alock->outcnt() == 2 &&
2138 _callprojs.fallthrough_proj != nullptr &&
2139 _callprojs.fallthrough_memproj != nullptr,
2140 "Unexpected projections from Lock/Unlock");
2141
2142 Node* fallthroughproj = _callprojs.fallthrough_proj;
2143 Node* memproj_fallthrough = _callprojs.fallthrough_memproj;
2144
2145 // The memory projection from a lock/unlock is RawMem
2146 // The input to a Lock is merged memory, so extract its RawMem input
2147 // (unless the MergeMem has been optimized away.)
2148 if (alock->is_Lock()) {
2149 // Search for MemBarAcquireLock node and delete it also.
2150 MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
2151 assert(membar != nullptr && membar->Opcode() == Op_MemBarAcquireLock, "");
2152 Node* ctrlproj = membar->proj_out(TypeFunc::Control);
2153 Node* memproj = membar->proj_out(TypeFunc::Memory);
2154 _igvn.replace_node(ctrlproj, fallthroughproj);
2155 _igvn.replace_node(memproj, memproj_fallthrough);
2156
2157 // Delete FastLock node also if this Lock node is unique user
2158 // (a loop peeling may clone a Lock node).
2159 Node* flock = alock->as_Lock()->fastlock_node();
2160 if (flock->outcnt() == 1) {
2161 assert(flock->unique_out() == alock, "sanity");
2162 _igvn.replace_node(flock, top());
2163 }
2194 assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2195
2196 // Make the merge point
2197 Node *region;
2198 Node *mem_phi;
2199 Node *slow_path;
2200
2201 region = new RegionNode(3);
2202 // create a Phi for the memory state
2203 mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2204
2205 // Optimize test; set region slot 2
2206 slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0);
2207 mem_phi->init_req(2, mem);
2208
2209 // Make slow path call
2210 CallNode *call = make_slow_call((CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(),
2211 OptoRuntime::complete_monitor_locking_Java(), nullptr, slow_path,
2212 obj, box, nullptr);
2213
2214 call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
2215
2216 // Slow path can only throw asynchronous exceptions, which are always
2217 // de-opted. So the compiler thinks the slow-call can never throw an
2218 // exception. If it DOES throw an exception we would need the debug
2219 // info removed first (since if it throws there is no monitor).
2220 assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
2221 _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
2222
2223 // Capture slow path
2224 // disconnect fall-through projection from call and create a new one
2225 // hook up users of fall-through projection to region
2226 Node *slow_ctrl = _callprojs.fallthrough_proj->clone();
2227 transform_later(slow_ctrl);
2228 _igvn.hash_delete(_callprojs.fallthrough_proj);
2229 _callprojs.fallthrough_proj->disconnect_inputs(C);
2230 region->init_req(1, slow_ctrl);
2231 // region inputs are now complete
2232 transform_later(region);
2233 _igvn.replace_node(_callprojs.fallthrough_proj, region);
2234
2235 Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory));
2236
2237 mem_phi->init_req(1, memproj);
2238
2239 transform_later(mem_phi);
2240
2241 _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi);
2242 }
2243
2244 //------------------------------expand_unlock_node----------------------
2245 void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
2246
2247 Node* ctrl = unlock->in(TypeFunc::Control);
2248 Node* mem = unlock->in(TypeFunc::Memory);
2249 Node* obj = unlock->obj_node();
2250 Node* box = unlock->box_node();
2251
2252 assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2253
2254 // No need for a null check on unlock
2255
2256 // Make the merge point
2257 Node *region;
2258 Node *mem_phi;
2259
2260 region = new RegionNode(3);
2261 // create a Phi for the memory state
2262 mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2263
2264 FastUnlockNode *funlock = new FastUnlockNode( ctrl, obj, box );
2265 funlock = transform_later( funlock )->as_FastUnlock();
2266 // Optimize test; set region slot 2
2267 Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0);
2268 Node *thread = transform_later(new ThreadLocalNode());
2269
2270 CallNode *call = make_slow_call((CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(),
2271 CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
2272 "complete_monitor_unlocking_C", slow_path, obj, box, thread);
2273
2274 call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
2275 assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
2276 _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
2277
2278 // No exceptions for unlocking
2279 // Capture slow path
2280 // disconnect fall-through projection from call and create a new one
2281 // hook up users of fall-through projection to region
2282 Node *slow_ctrl = _callprojs.fallthrough_proj->clone();
2283 transform_later(slow_ctrl);
2284 _igvn.hash_delete(_callprojs.fallthrough_proj);
2285 _callprojs.fallthrough_proj->disconnect_inputs(C);
2286 region->init_req(1, slow_ctrl);
2287 // region inputs are now complete
2288 transform_later(region);
2289 _igvn.replace_node(_callprojs.fallthrough_proj, region);
2290
2291 Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) );
2292 mem_phi->init_req(1, memproj );
2293 mem_phi->init_req(2, mem);
2294 transform_later(mem_phi);
2295
2296 _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi);
2297 }
2298
2299 void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) {
2300 assert(check->in(SubTypeCheckNode::Control) == nullptr, "should be pinned");
2301 Node* bol = check->unique_out();
2302 Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass);
2303 Node* superklass = check->in(SubTypeCheckNode::SuperKlass);
2304 assert(bol->is_Bool() && bol->as_Bool()->_test._test == BoolTest::ne, "unexpected bool node");
2305
2306 for (DUIterator_Last imin, i = bol->last_outs(imin); i >= imin; --i) {
2307 Node* iff = bol->last_out(i);
2308 assert(iff->is_If(), "where's the if?");
2309
2310 if (iff->in(0)->is_top()) {
2311 _igvn.replace_input_of(iff, 1, C->top());
2312 continue;
2313 }
2314
2315 Node* iftrue = iff->as_If()->proj_out(1);
2316 Node* iffalse = iff->as_If()->proj_out(0);
2317 Node* ctrl = iff->in(0);
2318
2319 Node* subklass = nullptr;
2320 if (_igvn.type(obj_or_subklass)->isa_klassptr()) {
2321 subklass = obj_or_subklass;
2322 } else {
2323 Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes());
2324 subklass = _igvn.transform(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), k_adr, TypeInstPtr::KLASS));
2325 }
2326
2327 Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, nullptr, _igvn, check->method(), check->bci());
2328
2329 _igvn.replace_input_of(iff, 0, C->top());
2330 _igvn.replace_node(iftrue, not_subtype_ctrl);
2331 _igvn.replace_node(iffalse, ctrl);
2332 }
2333 _igvn.replace_node(check, C->top());
2334 }
2335
2336 //---------------------------eliminate_macro_nodes----------------------
2337 // Eliminate scalar replaced allocations and associated locks.
2338 void PhaseMacroExpand::eliminate_macro_nodes() {
2339 if (C->macro_count() == 0)
2340 return;
2341 NOT_PRODUCT(int membar_before = count_MemBar(C);)
2342
2343 // Before elimination may re-mark (change to Nested or NonEscObj)
2344 // all associated (same box and obj) lock and unlock nodes.
2345 int cnt = C->macro_count();
2346 for (int i=0; i < cnt; i++) {
2347 Node *n = C->macro_node(i);
2348 if (n->is_AbstractLock()) { // Lock and Unlock nodes
2349 mark_eliminated_locking_nodes(n->as_AbstractLock());
2350 }
2351 }
2352 // Re-marking may break consistency of Coarsened locks.
2353 if (!C->coarsened_locks_consistent()) {
2354 return; // recompile without Coarsened locks if broken
2355 }
2376 }
2377 // Next, attempt to eliminate allocations
2378 _has_locks = false;
2379 progress = true;
2380 while (progress) {
2381 progress = false;
2382 for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once
2383 Node* n = C->macro_node(i - 1);
2384 bool success = false;
2385 DEBUG_ONLY(int old_macro_count = C->macro_count();)
2386 switch (n->class_id()) {
2387 case Node::Class_Allocate:
2388 case Node::Class_AllocateArray:
2389 success = eliminate_allocate_node(n->as_Allocate());
2390 #ifndef PRODUCT
2391 if (success && PrintOptoStatistics) {
2392 Atomic::inc(&PhaseMacroExpand::_objs_scalar_replaced_counter);
2393 }
2394 #endif
2395 break;
2396 case Node::Class_CallStaticJava:
2397 success = eliminate_boxing_node(n->as_CallStaticJava());
2398 break;
2399 case Node::Class_Lock:
2400 case Node::Class_Unlock:
2401 assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
2402 _has_locks = true;
2403 break;
2404 case Node::Class_ArrayCopy:
2405 break;
2406 case Node::Class_OuterStripMinedLoop:
2407 break;
2408 case Node::Class_SubTypeCheck:
2409 break;
2410 case Node::Class_Opaque1:
2411 break;
2412 default:
2413 assert(n->Opcode() == Op_LoopLimit ||
2414 n->Opcode() == Op_Opaque3 ||
2415 n->Opcode() == Op_Opaque4 ||
2416 n->Opcode() == Op_MaxL ||
2417 n->Opcode() == Op_MinL ||
2418 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n),
2419 "unknown node type in macro list");
2420 }
2421 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2422 progress = progress || success;
2423 }
2424 }
2425 #ifndef PRODUCT
2426 if (PrintOptoStatistics) {
2427 int membar_after = count_MemBar(C);
2428 Atomic::add(&PhaseMacroExpand::_memory_barriers_removed_counter, membar_before - membar_after);
2429 }
2430 #endif
2431 }
2437 C->shuffle_macro_nodes();
2438 }
2439 // Last attempt to eliminate macro nodes.
2440 eliminate_macro_nodes();
2441 if (C->failing()) return true;
2442
2443 // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2444 bool progress = true;
2445 while (progress) {
2446 progress = false;
2447 for (int i = C->macro_count(); i > 0; i--) {
2448 Node* n = C->macro_node(i-1);
2449 bool success = false;
2450 DEBUG_ONLY(int old_macro_count = C->macro_count();)
2451 if (n->Opcode() == Op_LoopLimit) {
2452 // Remove it from macro list and put on IGVN worklist to optimize.
2453 C->remove_macro_node(n);
2454 _igvn._worklist.push(n);
2455 success = true;
2456 } else if (n->Opcode() == Op_CallStaticJava) {
2457 // Remove it from macro list and put on IGVN worklist to optimize.
2458 C->remove_macro_node(n);
2459 _igvn._worklist.push(n);
2460 success = true;
2461 } else if (n->is_Opaque1()) {
2462 _igvn.replace_node(n, n->in(1));
2463 success = true;
2464 #if INCLUDE_RTM_OPT
2465 } else if ((n->Opcode() == Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
2466 assert(C->profile_rtm(), "should be used only in rtm deoptimization code");
2467 assert((n->outcnt() == 1) && n->unique_out()->is_Cmp(), "");
2468 Node* cmp = n->unique_out();
2469 #ifdef ASSERT
2470 // Validate graph.
2471 assert((cmp->outcnt() == 1) && cmp->unique_out()->is_Bool(), "");
2472 BoolNode* bol = cmp->unique_out()->as_Bool();
2473 assert((bol->outcnt() == 1) && bol->unique_out()->is_If() &&
2474 (bol->_test._test == BoolTest::ne), "");
2475 IfNode* ifn = bol->unique_out()->as_If();
2476 assert((ifn->outcnt() == 2) &&
2477 ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != nullptr, "");
2478 #endif
2479 Node* repl = n->in(1);
2480 if (!_has_locks) {
2558 // Worst case is a macro node gets expanded into about 200 nodes.
2559 // Allow 50% more for optimization.
2560 if (C->check_node_count(300, "out of nodes before macro expansion")) {
2561 return true;
2562 }
2563
2564 DEBUG_ONLY(int old_macro_count = C->macro_count();)
2565 switch (n->class_id()) {
2566 case Node::Class_Lock:
2567 expand_lock_node(n->as_Lock());
2568 break;
2569 case Node::Class_Unlock:
2570 expand_unlock_node(n->as_Unlock());
2571 break;
2572 case Node::Class_ArrayCopy:
2573 expand_arraycopy_node(n->as_ArrayCopy());
2574 break;
2575 case Node::Class_SubTypeCheck:
2576 expand_subtypecheck_node(n->as_SubTypeCheck());
2577 break;
2578 default:
2579 assert(false, "unknown node type in macro list");
2580 }
2581 assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
2582 if (C->failing()) return true;
2583 C->print_method(PHASE_AFTER_MACRO_EXPANSION_STEP, 5, n);
2584
2585 // Clean up the graph so we're less likely to hit the maximum node
2586 // limit
2587 _igvn.set_delay_transform(false);
2588 _igvn.optimize();
2589 if (C->failing()) return true;
2590 _igvn.set_delay_transform(true);
2591 }
2592
2593 // All nodes except Allocate nodes are expanded now. There could be
2594 // new optimization opportunities (such as folding newly created
2595 // load from a just allocated object). Run IGVN.
2596
2597 // expand "macro" nodes
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciFlatArrayKlass.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "gc/shared/collectedHeap.inline.hpp"
29 #include "gc/shared/tlab_globals.hpp"
30 #include "libadt/vectset.hpp"
31 #include "memory/universe.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/arraycopynode.hpp"
34 #include "opto/callnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/compile.hpp"
38 #include "opto/convertnode.hpp"
39 #include "opto/graphKit.hpp"
40 #include "opto/inlinetypenode.hpp"
41 #include "opto/intrinsicnode.hpp"
42 #include "opto/locknode.hpp"
43 #include "opto/loopnode.hpp"
44 #include "opto/macro.hpp"
45 #include "opto/memnode.hpp"
46 #include "opto/narrowptrnode.hpp"
47 #include "opto/node.hpp"
48 #include "opto/opaquenode.hpp"
49 #include "opto/phaseX.hpp"
50 #include "opto/rootnode.hpp"
51 #include "opto/runtime.hpp"
52 #include "opto/subnode.hpp"
53 #include "opto/subtypenode.hpp"
54 #include "opto/type.hpp"
55 #include "prims/jvmtiExport.hpp"
56 #include "runtime/continuation.hpp"
57 #include "runtime/sharedRuntime.hpp"
58 #include "runtime/stubRoutines.hpp"
59 #include "utilities/macros.hpp"
60 #include "utilities/powerOfTwo.hpp"
61 #if INCLUDE_G1GC
62 #include "gc/g1/g1ThreadLocalData.hpp"
63 #endif // INCLUDE_G1GC
64
65
66 //
67 // Replace any references to "oldref" in inputs to "use" with "newref".
68 // Returns the number of replacements made.
69 //
70 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
71 int nreplacements = 0;
72 uint req = use->req();
73 for (uint j = 0; j < use->len(); j++) {
74 Node *uin = use->in(j);
75 if (uin == oldref) {
76 if (j < req)
77 use->set_req(j, newref);
78 else
79 use->set_prec(j, newref);
80 nreplacements++;
81 } else if (j >= req && uin == nullptr) {
82 break;
83 }
84 }
85 return nreplacements;
86 }
87
88 Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) {
89 Node* cmp;
90 if (mask != 0) {
91 Node* and_node = transform_later(new AndXNode(word, MakeConX(mask)));
92 cmp = transform_later(new CmpXNode(and_node, MakeConX(bits)));
93 } else {
94 cmp = word;
95 }
96 Node* bol = transform_later(new BoolNode(cmp, BoolTest::ne));
97 IfNode* iff = new IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
98 transform_later(iff);
99
100 // Fast path taken.
101 Node *fast_taken = transform_later(new IfFalseNode(iff));
102
103 // Fast path not-taken, i.e. slow path
104 Node *slow_taken = transform_later(new IfTrueNode(iff));
105
106 if (return_fast_path) {
107 region->init_req(edge, slow_taken); // Capture slow-control
130 // Slow-path call
131 CallNode *call = leaf_name
132 ? (CallNode*)new CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
133 : (CallNode*)new CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), TypeRawPtr::BOTTOM );
134
135 // Slow path call has no side-effects, uses few values
136 copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
137 if (parm0 != nullptr) call->init_req(TypeFunc::Parms+0, parm0);
138 if (parm1 != nullptr) call->init_req(TypeFunc::Parms+1, parm1);
139 if (parm2 != nullptr) call->init_req(TypeFunc::Parms+2, parm2);
140 call->copy_call_debug_info(&_igvn, oldcall);
141 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
142 _igvn.replace_node(oldcall, call);
143 transform_later(call);
144
145 return call;
146 }
147
148 void PhaseMacroExpand::eliminate_gc_barrier(Node* p2x) {
149 BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
150 bs->eliminate_gc_barrier(&_igvn, p2x);
151 #ifndef PRODUCT
152 if (PrintOptoStatistics) {
153 Atomic::inc(&PhaseMacroExpand::_GC_barriers_removed_counter);
154 }
155 #endif
156 }
157
158 // Search for a memory operation for the specified memory slice.
159 static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
160 Node *orig_mem = mem;
161 Node *alloc_mem = alloc->as_Allocate()->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
162 assert(alloc_mem != nullptr, "Allocation without a memory projection.");
163 const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
164 while (true) {
165 if (mem == alloc_mem || mem == start_mem ) {
166 return mem; // hit one of our sentinels
167 } else if (mem->is_MergeMem()) {
168 mem = mem->as_MergeMem()->memory_at(alias_idx);
169 } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
170 Node *in = mem->in(0);
185 ArrayCopyNode* ac = nullptr;
186 if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) {
187 if (ac != nullptr) {
188 assert(ac->is_clonebasic(), "Only basic clone is a non escaping clone");
189 return ac;
190 }
191 }
192 mem = in->in(TypeFunc::Memory);
193 } else {
194 #ifdef ASSERT
195 in->dump();
196 mem->dump();
197 assert(false, "unexpected projection");
198 #endif
199 }
200 } else if (mem->is_Store()) {
201 const TypePtr* atype = mem->as_Store()->adr_type();
202 int adr_idx = phase->C->get_alias_index(atype);
203 if (adr_idx == alias_idx) {
204 assert(atype->isa_oopptr(), "address type must be oopptr");
205 int adr_offset = atype->flat_offset();
206 uint adr_iid = atype->is_oopptr()->instance_id();
207 // Array elements references have the same alias_idx
208 // but different offset and different instance_id.
209 if (adr_offset == offset && adr_iid == alloc->_idx) {
210 return mem;
211 }
212 } else {
213 assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
214 }
215 mem = mem->in(MemNode::Memory);
216 } else if (mem->is_ClearArray()) {
217 if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
218 // Can not bypass initialization of the instance
219 // we are looking.
220 debug_only(intptr_t offset;)
221 assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
222 InitializeNode* init = alloc->as_Allocate()->initialization();
223 // We are looking for stored value, return Initialize node
224 // or memory edge from Allocate node.
225 if (init != nullptr) {
230 }
231 // Otherwise skip it (the call updated 'mem' value).
232 } else if (mem->Opcode() == Op_SCMemProj) {
233 mem = mem->in(0);
234 Node* adr = nullptr;
235 if (mem->is_LoadStore()) {
236 adr = mem->in(MemNode::Address);
237 } else {
238 assert(mem->Opcode() == Op_EncodeISOArray ||
239 mem->Opcode() == Op_StrCompressedCopy, "sanity");
240 adr = mem->in(3); // Destination array
241 }
242 const TypePtr* atype = adr->bottom_type()->is_ptr();
243 int adr_idx = phase->C->get_alias_index(atype);
244 if (adr_idx == alias_idx) {
245 DEBUG_ONLY(mem->dump();)
246 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
247 return nullptr;
248 }
249 mem = mem->in(MemNode::Memory);
250 } else if (mem->Opcode() == Op_StrInflatedCopy) {
251 Node* adr = mem->in(3); // Destination array
252 const TypePtr* atype = adr->bottom_type()->is_ptr();
253 int adr_idx = phase->C->get_alias_index(atype);
254 if (adr_idx == alias_idx) {
255 DEBUG_ONLY(mem->dump();)
256 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
257 return nullptr;
258 }
259 mem = mem->in(MemNode::Memory);
260 } else {
261 return mem;
262 }
263 assert(mem != orig_mem, "dead memory loop");
264 }
265 }
266
267 // Generate loads from source of the arraycopy for fields of
268 // destination needed at a deoptimization point
269 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc) {
270 BasicType bt = ft;
275 }
276 Node* res = nullptr;
277 if (ac->is_clonebasic()) {
278 assert(ac->in(ArrayCopyNode::Src) != ac->in(ArrayCopyNode::Dest), "clone source equals destination");
279 Node* base = ac->in(ArrayCopyNode::Src);
280 Node* adr = _igvn.transform(new AddPNode(base, base, _igvn.MakeConX(offset)));
281 const TypePtr* adr_type = _igvn.type(base)->is_ptr()->add_offset(offset);
282 MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem();
283 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
284 res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt);
285 } else {
286 if (ac->modifies(offset, offset, &_igvn, true)) {
287 assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result");
288 uint shift = exact_log2(type2aelembytes(bt));
289 Node* src_pos = ac->in(ArrayCopyNode::SrcPos);
290 Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
291 const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int();
292 const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int();
293
294 Node* adr = nullptr;
295 Node* base = ac->in(ArrayCopyNode::Src);
296 const TypeAryPtr* adr_type = _igvn.type(base)->is_aryptr();
297 if (adr_type->is_flat()) {
298 shift = adr_type->flat_log_elem_size();
299 }
300 if (src_pos_t->is_con() && dest_pos_t->is_con()) {
301 intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset;
302 adr = _igvn.transform(new AddPNode(base, base, _igvn.MakeConX(off)));
303 adr_type = _igvn.type(adr)->is_aryptr();
304 assert(adr_type == _igvn.type(base)->is_aryptr()->add_field_offset_and_offset(off), "incorrect address type");
305 if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
306 // Don't emit a new load from src if src == dst but try to get the value from memory instead
307 return value_from_mem(ac->in(TypeFunc::Memory), ctl, ft, ftype, adr_type, alloc);
308 }
309 } else {
310 if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
311 // Non constant offset in the array: we can't statically
312 // determine the value
313 return nullptr;
314 }
315 Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
316 #ifdef _LP64
317 diff = _igvn.transform(new ConvI2LNode(diff));
318 #endif
319 diff = _igvn.transform(new LShiftXNode(diff, _igvn.intcon(shift)));
320
321 Node* off = _igvn.transform(new AddXNode(_igvn.MakeConX(offset), diff));
322 adr = _igvn.transform(new AddPNode(base, base, off));
323 // In the case of a flat inline type array, each field has its
324 // own slice so we need to extract the field being accessed from
325 // the address computation
326 adr_type = adr_type->add_field_offset_and_offset(offset)->add_offset(Type::OffsetBot)->is_aryptr();
327 adr = _igvn.transform(new CastPPNode(ctl, adr, adr_type));
328 }
329 MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem();
330 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
331 res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt);
332 }
333 }
334 if (res != nullptr) {
335 if (ftype->isa_narrowoop()) {
336 // PhaseMacroExpand::scalar_replacement adds DecodeN nodes
337 assert(res->isa_DecodeN(), "should be narrow oop");
338 res = _igvn.transform(new EncodePNode(res, ftype));
339 }
340 return res;
341 }
342 return nullptr;
343 }
344
345 //
346 // Given a Memory Phi, compute a value Phi containing the values from stores
347 // on the input paths.
348 // Note: this function is recursive, its depth is limited by the "level" argument
349 // Returns the computed Phi, or null if it cannot compute it.
350 Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
351 assert(mem->is_Phi(), "sanity");
352 int alias_idx = C->get_alias_index(adr_t);
353 int offset = adr_t->flat_offset();
354 int instance_id = adr_t->instance_id();
355
356 // Check if an appropriate value phi already exists.
357 Node* region = mem->in(0);
358 for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
359 Node* phi = region->fast_out(k);
360 if (phi->is_Phi() && phi != mem &&
361 phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) {
362 return phi;
363 }
364 }
365 // Check if an appropriate new value phi already exists.
366 Node* new_phi = value_phis->find(mem->_idx);
367 if (new_phi != nullptr)
368 return new_phi;
369
370 if (level <= 0) {
371 return nullptr; // Give up: phi tree too deep
372 }
373 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
374 Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
375 assert(alloc_mem != nullptr, "Allocation without a memory projection.");
376
377 uint length = mem->req();
378 GrowableArray <Node *> values(length, length, nullptr);
379
380 // create a new Phi for the value
381 PhiNode *phi = new PhiNode(mem->in(0), phi_type, nullptr, mem->_idx, instance_id, alias_idx, offset);
382 transform_later(phi);
383 value_phis->push(phi, mem->_idx);
384
385 for (uint j = 1; j < length; j++) {
386 Node *in = mem->in(j);
387 if (in == nullptr || in->is_top()) {
388 values.at_put(j, in);
389 } else {
390 Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
391 if (val == start_mem || val == alloc_mem) {
392 // hit a sentinel, return appropriate 0 value
393 Node* default_value = alloc->in(AllocateNode::DefaultValue);
394 if (default_value != nullptr) {
395 values.at_put(j, default_value);
396 } else {
397 assert(alloc->in(AllocateNode::RawDefaultValue) == nullptr, "default value may not be null");
398 values.at_put(j, _igvn.zerocon(ft));
399 }
400 continue;
401 }
402 if (val->is_Initialize()) {
403 val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
404 }
405 if (val == nullptr) {
406 return nullptr; // can't find a value on this path
407 }
408 if (val == mem) {
409 values.at_put(j, mem);
410 } else if (val->is_Store()) {
411 Node* n = val->in(MemNode::ValueIn);
412 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
413 n = bs->step_over_gc_barrier(n);
414 if (is_subword_type(ft)) {
415 n = Compile::narrow_value(ft, n, phi_type, &_igvn, true);
416 }
417 values.at_put(j, n);
418 } else if(val->is_Proj() && val->in(0) == alloc) {
419 Node* default_value = alloc->in(AllocateNode::DefaultValue);
420 if (default_value != nullptr) {
421 values.at_put(j, default_value);
422 } else {
423 assert(alloc->in(AllocateNode::RawDefaultValue) == nullptr, "default value may not be null");
424 values.at_put(j, _igvn.zerocon(ft));
425 }
426 } else if (val->is_Phi()) {
427 val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
428 if (val == nullptr) {
429 return nullptr;
430 }
431 values.at_put(j, val);
432 } else if (val->Opcode() == Op_SCMemProj) {
433 assert(val->in(0)->is_LoadStore() ||
434 val->in(0)->Opcode() == Op_EncodeISOArray ||
435 val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
436 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
437 return nullptr;
438 } else if (val->is_ArrayCopy()) {
439 Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
440 if (res == nullptr) {
441 return nullptr;
442 }
443 values.at_put(j, res);
444 } else {
445 DEBUG_ONLY( val->dump(); )
449 }
450 }
451 // Set Phi's inputs
452 for (uint j = 1; j < length; j++) {
453 if (values.at(j) == mem) {
454 phi->init_req(j, phi);
455 } else {
456 phi->init_req(j, values.at(j));
457 }
458 }
459 return phi;
460 }
461
462 // Search the last value stored into the object's field.
463 Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc) {
464 assert(adr_t->is_known_instance_field(), "instance required");
465 int instance_id = adr_t->instance_id();
466 assert((uint)instance_id == alloc->_idx, "wrong allocation");
467
468 int alias_idx = C->get_alias_index(adr_t);
469 int offset = adr_t->flat_offset();
470 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
471 Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
472 assert(alloc_mem != nullptr, "Allocation without a memory projection.");
473 VectorSet visited;
474
475 bool done = sfpt_mem == alloc_mem;
476 Node *mem = sfpt_mem;
477 while (!done) {
478 if (visited.test_set(mem->_idx)) {
479 return nullptr; // found a loop, give up
480 }
481 mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
482 if (mem == start_mem || mem == alloc_mem) {
483 done = true; // hit a sentinel, return appropriate 0 value
484 } else if (mem->is_Initialize()) {
485 mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
486 if (mem == nullptr) {
487 done = true; // Something went wrong.
488 } else if (mem->is_Store()) {
489 const TypePtr* atype = mem->as_Store()->adr_type();
490 assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
491 done = true;
492 }
493 } else if (mem->is_Store()) {
494 const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
495 assert(atype != nullptr, "address type must be oopptr");
496 assert(C->get_alias_index(atype) == alias_idx &&
497 atype->is_known_instance_field() && atype->flat_offset() == offset &&
498 atype->instance_id() == instance_id, "store is correct memory slice");
499 done = true;
500 } else if (mem->is_Phi()) {
501 // try to find a phi's unique input
502 Node *unique_input = nullptr;
503 Node *top = C->top();
504 for (uint i = 1; i < mem->req(); i++) {
505 Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
506 if (n == nullptr || n == top || n == mem) {
507 continue;
508 } else if (unique_input == nullptr) {
509 unique_input = n;
510 } else if (unique_input != n) {
511 unique_input = top;
512 break;
513 }
514 }
515 if (unique_input != nullptr && unique_input != top) {
516 mem = unique_input;
517 } else {
518 done = true;
519 }
520 } else if (mem->is_ArrayCopy()) {
521 done = true;
522 } else {
523 DEBUG_ONLY( mem->dump(); )
524 assert(false, "unexpected node");
525 }
526 }
527 if (mem != nullptr) {
528 if (mem == start_mem || mem == alloc_mem) {
529 // hit a sentinel, return appropriate 0 value
530 Node* default_value = alloc->in(AllocateNode::DefaultValue);
531 if (default_value != nullptr) {
532 return default_value;
533 }
534 assert(alloc->in(AllocateNode::RawDefaultValue) == nullptr, "default value may not be null");
535 return _igvn.zerocon(ft);
536 } else if (mem->is_Store()) {
537 Node* n = mem->in(MemNode::ValueIn);
538 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
539 n = bs->step_over_gc_barrier(n);
540 return n;
541 } else if (mem->is_Phi()) {
542 // attempt to produce a Phi reflecting the values on the input paths of the Phi
543 Node_Stack value_phis(8);
544 Node* phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
545 if (phi != nullptr) {
546 return phi;
547 } else {
548 // Kill all new Phis
549 while(value_phis.is_nonempty()) {
550 Node* n = value_phis.node();
551 _igvn.replace_node(n, C->top());
552 value_phis.pop();
553 }
554 }
555 } else if (mem->is_ArrayCopy()) {
556 Node* ctl = mem->in(0);
557 Node* m = mem->in(TypeFunc::Memory);
558 if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj()) {
559 // pin the loads in the uncommon trap path
560 ctl = sfpt_ctl;
561 m = sfpt_mem;
562 }
563 return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc);
564 }
565 }
566 // Something went wrong.
567 return nullptr;
568 }
569
570 // Search the last value stored into the inline type's fields.
571 Node* PhaseMacroExpand::inline_type_from_mem(Node* mem, Node* ctl, ciInlineKlass* vk, const TypeAryPtr* adr_type, int offset, AllocateNode* alloc) {
572 // Subtract the offset of the first field to account for the missing oop header
573 offset -= vk->first_field_offset();
574 // Create a new InlineTypeNode and retrieve the field values from memory
575 InlineTypeNode* vt = InlineTypeNode::make_uninitialized(_igvn, vk);
576 transform_later(vt);
577 for (int i = 0; i < vk->nof_declared_nonstatic_fields(); ++i) {
578 ciType* field_type = vt->field_type(i);
579 int field_offset = offset + vt->field_offset(i);
580 Node* value = nullptr;
581 if (vt->field_is_flat(i)) {
582 value = inline_type_from_mem(mem, ctl, field_type->as_inline_klass(), adr_type, field_offset, alloc);
583 } else {
584 const Type* ft = Type::get_const_type(field_type);
585 BasicType bt = type2field[field_type->basic_type()];
586 if (UseCompressedOops && !is_java_primitive(bt)) {
587 ft = ft->make_narrowoop();
588 bt = T_NARROWOOP;
589 }
590 // Each inline type field has its own memory slice
591 adr_type = adr_type->with_field_offset(field_offset);
592 value = value_from_mem(mem, ctl, bt, ft, adr_type, alloc);
593 if (value != nullptr && ft->isa_narrowoop()) {
594 assert(UseCompressedOops, "unexpected narrow oop");
595 if (value->is_EncodeP()) {
596 value = value->in(1);
597 } else {
598 value = transform_later(new DecodeNNode(value, value->get_ptr_type()));
599 }
600 }
601 }
602 if (value != nullptr) {
603 vt->set_field_value(i, value);
604 } else {
605 // We might have reached the TrackedInitializationLimit
606 return nullptr;
607 }
608 }
609 return vt;
610 }
611
612 // Check the possibility of scalar replacement.
613 bool PhaseMacroExpand::can_eliminate_allocation(PhaseIterGVN* igvn, AllocateNode *alloc, GrowableArray <SafePointNode *>* safepoints) {
614 // Scan the uses of the allocation to check for anything that would
615 // prevent us from eliminating it.
616 NOT_PRODUCT( const char* fail_eliminate = nullptr; )
617 DEBUG_ONLY( Node* disq_node = nullptr; )
618 bool can_eliminate = true;
619 bool reduce_merge_precheck = (safepoints == nullptr);
620
621 Unique_Node_List worklist;
622 Node* res = alloc->result_cast();
623 const TypeOopPtr* res_type = nullptr;
624 if (res == nullptr) {
625 // All users were eliminated.
626 } else if (!res->is_CheckCastPP()) {
627 NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
628 can_eliminate = false;
629 } else {
630 worklist.push(res);
631 res_type = igvn->type(res)->isa_oopptr();
632 if (res_type == nullptr) {
633 NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
634 can_eliminate = false;
635 } else if (res_type->isa_aryptr()) {
636 int length = alloc->in(AllocateNode::ALength)->find_int_con(-1);
637 if (length < 0) {
638 NOT_PRODUCT(fail_eliminate = "Array's size is not constant";)
639 can_eliminate = false;
640 }
641 }
642 }
643
644 while (can_eliminate && worklist.size() > 0) {
645 BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
646 res = worklist.pop();
647 for (DUIterator_Fast jmax, j = res->fast_outs(jmax); j < jmax && can_eliminate; j++) {
648 Node* use = res->fast_out(j);
649
650 if (use->is_AddP()) {
651 const TypePtr* addp_type = igvn->type(use)->is_ptr();
652 int offset = addp_type->offset();
653
654 if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
655 NOT_PRODUCT(fail_eliminate = "Undefined field reference";)
656 can_eliminate = false;
657 break;
658 }
659 for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
660 k < kmax && can_eliminate; k++) {
661 Node* n = use->fast_out(k);
662 if (!n->is_Store() && n->Opcode() != Op_CastP2X && !bs->is_gc_pre_barrier_node(n) && !reduce_merge_precheck) {
663 DEBUG_ONLY(disq_node = n;)
664 if (n->is_Load() || n->is_LoadStore()) {
665 NOT_PRODUCT(fail_eliminate = "Field load";)
666 } else {
667 NOT_PRODUCT(fail_eliminate = "Not store field reference";)
673 (use->as_ArrayCopy()->is_clonebasic() ||
674 use->as_ArrayCopy()->is_arraycopy_validated() ||
675 use->as_ArrayCopy()->is_copyof_validated() ||
676 use->as_ArrayCopy()->is_copyofrange_validated()) &&
677 use->in(ArrayCopyNode::Dest) == res) {
678 // ok to eliminate
679 } else if (use->is_SafePoint()) {
680 SafePointNode* sfpt = use->as_SafePoint();
681 if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
682 // Object is passed as argument.
683 DEBUG_ONLY(disq_node = use;)
684 NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
685 can_eliminate = false;
686 }
687 Node* sfptMem = sfpt->memory();
688 if (sfptMem == nullptr || sfptMem->is_top()) {
689 DEBUG_ONLY(disq_node = use;)
690 NOT_PRODUCT(fail_eliminate = "null or TOP memory";)
691 can_eliminate = false;
692 } else if (!reduce_merge_precheck) {
693 if (res->is_Phi() && res->as_Phi()->can_be_inline_type()) {
694 // Can only eliminate allocation if the phi had been replaced by an InlineTypeNode before which did not happen.
695 // TODO 8325106 Why wasn't it replaced by an InlineTypeNode?
696 can_eliminate = false;
697 }
698 safepoints->append_if_missing(sfpt);
699 }
700 } else if (use->is_InlineType() && use->as_InlineType()->get_oop() == res) {
701 // Look at uses
702 for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
703 Node* u = use->fast_out(k);
704 if (u->is_InlineType()) {
705 // Use in flat field can be eliminated
706 InlineTypeNode* vt = u->as_InlineType();
707 for (uint i = 0; i < vt->field_count(); ++i) {
708 if (vt->field_value(i) == use && !vt->field_is_flat(i)) {
709 can_eliminate = false; // Use in non-flat field
710 break;
711 }
712 }
713 } else {
714 // Add other uses to the worklist to process individually
715 // TODO will be fixed by 8328470
716 worklist.push(use);
717 }
718 }
719 } else if (use->Opcode() == Op_StoreX && use->in(MemNode::Address) == res) {
720 // Store to mark word of inline type larval buffer
721 assert(res_type->is_inlinetypeptr(), "Unexpected store to mark word");
722 } else if (res_type->is_inlinetypeptr() && use->Opcode() == Op_MemBarRelease) {
723 // Inline type buffer allocations are followed by a membar
724 } else if (reduce_merge_precheck && (use->is_Phi() || use->is_EncodeP() || use->Opcode() == Op_MemBarRelease)) {
725 // Nothing to do
726 } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
727 if (use->is_Phi()) {
728 if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
729 NOT_PRODUCT(fail_eliminate = "Object is return value";)
730 } else {
731 NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
732 }
733 DEBUG_ONLY(disq_node = use;)
734 } else {
735 if (use->Opcode() == Op_Return) {
736 NOT_PRODUCT(fail_eliminate = "Object is return value";)
737 } else {
738 NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
739 }
740 DEBUG_ONLY(disq_node = use;)
741 }
742 can_eliminate = false;
743 } else {
744 assert(use->Opcode() == Op_CastP2X, "should be");
745 assert(!use->has_out_with(Op_OrL), "should have been removed because oop is never null");
746 }
747 }
748 }
749
750 #ifndef PRODUCT
751 if (PrintEliminateAllocations && safepoints != nullptr) {
752 if (can_eliminate) {
753 tty->print("Scalar ");
754 if (res == nullptr)
755 alloc->dump();
756 else
757 res->dump();
758 } else {
759 tty->print("NotScalar (%s)", fail_eliminate);
760 if (res == nullptr)
761 alloc->dump();
762 else
763 res->dump();
764 #ifdef ASSERT
765 if (disq_node != nullptr) {
766 tty->print(" >>>> ");
767 disq_node->dump();
768 }
769 #endif /*ASSERT*/
770 }
771 }
772
773 if (TraceReduceAllocationMerges && !can_eliminate && reduce_merge_precheck) {
774 tty->print_cr("\tCan't eliminate allocation because '%s': ", fail_eliminate != nullptr ? fail_eliminate : "");
775 DEBUG_ONLY(if (disq_node != nullptr) disq_node->dump();)
776 }
777 #endif
778 return can_eliminate;
808 JVMState *jvms = sfpt_done->jvms();
809 jvms->set_endoff(sfpt_done->req());
810 // Now make a pass over the debug information replacing any references
811 // to SafePointScalarObjectNode with the allocated object.
812 int start = jvms->debug_start();
813 int end = jvms->debug_end();
814 for (int i = start; i < end; i++) {
815 if (sfpt_done->in(i)->is_SafePointScalarObject()) {
816 SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
817 if (scobj->first_index(jvms) == sfpt_done->req() &&
818 scobj->n_fields() == (uint)nfields) {
819 assert(scobj->alloc() == alloc, "sanity");
820 sfpt_done->set_req(i, res);
821 }
822 }
823 }
824 _igvn._worklist.push(sfpt_done);
825 }
826 }
827
828 SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_description(AllocateNode *alloc, SafePointNode* sfpt,
829 Unique_Node_List* value_worklist) {
830 // Fields of scalar objs are referenced only at the end
831 // of regular debuginfo at the last (youngest) JVMS.
832 // Record relative start index.
833 ciInstanceKlass* iklass = nullptr;
834 BasicType basic_elem_type = T_ILLEGAL;
835 const Type* field_type = nullptr;
836 const TypeOopPtr* res_type = nullptr;
837 int nfields = 0;
838 int array_base = 0;
839 int element_size = 0;
840 uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
841 Node* res = alloc->result_cast();
842
843 assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
844 assert(sfpt->jvms() != nullptr, "missed JVMS");
845
846 if (res != nullptr) { // Could be null when there are no users
847 res_type = _igvn.type(res)->isa_oopptr();
848
849 if (res_type->isa_instptr()) {
850 // find the fields of the class which will be needed for safepoint debug information
851 iklass = res_type->is_instptr()->instance_klass();
852 nfields = iklass->nof_nonstatic_fields();
853 } else {
854 // find the array's elements which will be needed for safepoint debug information
855 nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
856 assert(nfields >= 0, "must be an array klass.");
857 basic_elem_type = res_type->is_aryptr()->elem()->array_element_basic_type();
858 array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
859 element_size = type2aelembytes(basic_elem_type);
860 field_type = res_type->is_aryptr()->elem();
861 if (res_type->is_flat()) {
862 // Flat inline type array
863 element_size = res_type->is_aryptr()->flat_elem_size();
864 }
865 }
866 }
867
868 SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type, alloc, first_ind, sfpt->jvms()->depth(), nfields);
869 sobj->init_req(0, C->root());
870 transform_later(sobj);
871
872 // Scan object's fields adding an input to the safepoint for each field.
873 for (int j = 0; j < nfields; j++) {
874 intptr_t offset;
875 ciField* field = nullptr;
876 if (iklass != nullptr) {
877 field = iklass->nonstatic_field_at(j);
878 offset = field->offset_in_bytes();
879 ciType* elem_type = field->type();
880 basic_elem_type = field->layout_type();
881 assert(!field->is_flat(), "flat inline type fields should not have safepoint uses");
882
883 // The next code is taken from Parse::do_get_xxx().
884 if (is_reference_type(basic_elem_type)) {
885 if (!elem_type->is_loaded()) {
886 field_type = TypeInstPtr::BOTTOM;
887 } else if (field != nullptr && field->is_static_constant()) {
888 ciObject* con = field->constant_value().as_object();
889 // Do not "join" in the previous type; it doesn't add value,
890 // and may yield a vacuous result if the field is of interface type.
891 field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
892 assert(field_type != nullptr, "field singleton type must be consistent");
893 } else {
894 field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
895 }
896 if (UseCompressedOops) {
897 field_type = field_type->make_narrowoop();
898 basic_elem_type = T_NARROWOOP;
899 }
900 } else {
901 field_type = Type::get_const_basic_type(basic_elem_type);
902 }
903 } else {
904 offset = array_base + j * (intptr_t)element_size;
905 }
906
907 Node* field_val = nullptr;
908 const TypeOopPtr* field_addr_type = res_type->add_offset(offset)->isa_oopptr();
909 if (res_type->is_flat()) {
910 ciInlineKlass* inline_klass = res_type->is_aryptr()->elem()->inline_klass();
911 assert(inline_klass->flat_in_array(), "must be flat in array");
912 field_val = inline_type_from_mem(sfpt->memory(), sfpt->control(), inline_klass, field_addr_type->isa_aryptr(), 0, alloc);
913 } else {
914 field_val = value_from_mem(sfpt->memory(), sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc);
915 }
916
917 // We weren't able to find a value for this field,
918 // give up on eliminating this allocation.
919 if (field_val == nullptr) {
920 uint last = sfpt->req() - 1;
921 for (int k = 0; k < j; k++) {
922 sfpt->del_req(last--);
923 }
924 _igvn._worklist.push(sfpt);
925
926 #ifndef PRODUCT
927 if (PrintEliminateAllocations) {
928 if (field != nullptr) {
929 tty->print("=== At SafePoint node %d can't find value of field: ", sfpt->_idx);
930 field->print();
931 int field_idx = C->get_alias_index(field_addr_type);
932 tty->print(" (alias_idx=%d)", field_idx);
933 } else { // Array's element
934 tty->print("=== At SafePoint node %d can't find value of array element [%d]", sfpt->_idx, j);
935 }
936 tty->print(", which prevents elimination of: ");
937 if (res == nullptr)
938 alloc->dump();
939 else
940 res->dump();
941 }
942 #endif
943
944 return nullptr;
945 }
946
947 if (UseCompressedOops && field_type->isa_narrowoop()) {
948 // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
949 // to be able scalar replace the allocation.
950 if (field_val->is_EncodeP()) {
951 field_val = field_val->in(1);
952 } else if (!field_val->is_InlineType()) {
953 field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
954 }
955 }
956
957 // Keep track of inline types to scalarize them later
958 if (field_val->is_InlineType()) {
959 value_worklist->push(field_val);
960 } else if (field_val->is_Phi()) {
961 PhiNode* phi = field_val->as_Phi();
962 // Eagerly replace inline type phis now since we could be removing an inline type allocation where we must
963 // scalarize all its fields in safepoints.
964 field_val = phi->try_push_inline_types_down(&_igvn, true);
965 if (field_val->is_InlineType()) {
966 value_worklist->push(field_val);
967 }
968 }
969 sfpt->add_req(field_val);
970 }
971
972 sfpt->jvms()->set_endoff(sfpt->req());
973
974 return sobj;
975 }
976
977 // Do scalar replacement.
978 bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
979 GrowableArray <SafePointNode *> safepoints_done;
980 Node* res = alloc->result_cast();
981 assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
982 const TypeOopPtr* res_type = nullptr;
983 if (res != nullptr) { // Could be null when there are no users
984 res_type = _igvn.type(res)->isa_oopptr();
985 }
986
987 // Process the safepoint uses
988 assert(safepoints.length() == 0 || !res_type->is_inlinetypeptr(), "Inline type allocations should not have safepoint uses");
989 Unique_Node_List value_worklist;
990 while (safepoints.length() > 0) {
991 SafePointNode* sfpt = safepoints.pop();
992 SafePointScalarObjectNode* sobj = create_scalarized_object_description(alloc, sfpt, &value_worklist);
993
994 if (sobj == nullptr) {
995 undo_previous_scalarizations(safepoints_done, alloc);
996 return false;
997 }
998
999 // Now make a pass over the debug information replacing any references
1000 // to the allocated object with "sobj"
1001 JVMState *jvms = sfpt->jvms();
1002 sfpt->replace_edges_in_range(res, sobj, jvms->debug_start(), jvms->debug_end(), &_igvn);
1003 _igvn._worklist.push(sfpt);
1004
1005 // keep it for rollback
1006 safepoints_done.append_if_missing(sfpt);
1007 }
1008 // Scalarize inline types that were added to the safepoint.
1009 // Don't allow linking a constant oop (if available) for flat array elements
1010 // because Deoptimization::reassign_flat_array_elements needs field values.
1011 bool allow_oop = (res_type != nullptr) && !res_type->is_flat();
1012 for (uint i = 0; i < value_worklist.size(); ++i) {
1013 InlineTypeNode* vt = value_worklist.at(i)->as_InlineType();
1014 vt->make_scalar_in_safepoints(&_igvn, allow_oop);
1015 }
1016 return true;
1017 }
1018
1019 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
1020 Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
1021 Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
1022 if (ctl_proj != nullptr) {
1023 igvn.replace_node(ctl_proj, n->in(0));
1024 }
1025 if (mem_proj != nullptr) {
1026 igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
1027 }
1028 }
1029
1030 // Process users of eliminated allocation.
1031 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc, bool inline_alloc) {
1032 Unique_Node_List worklist;
1033 Node* res = alloc->result_cast();
1034 if (res != nullptr) {
1035 worklist.push(res);
1036 }
1037 while (worklist.size() > 0) {
1038 res = worklist.pop();
1039 for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
1040 Node *use = res->last_out(j);
1041 uint oc1 = res->outcnt();
1042
1043 if (use->is_AddP()) {
1044 for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
1045 Node *n = use->last_out(k);
1046 uint oc2 = use->outcnt();
1047 if (n->is_Store()) {
1048 for (DUIterator_Fast pmax, p = n->fast_outs(pmax); p < pmax; p++) {
1049 MemBarNode* mb = n->fast_out(p)->isa_MemBar();
1050 if (mb != nullptr && mb->req() <= MemBarNode::Precedent && mb->in(MemBarNode::Precedent) == n) {
1051 // MemBarVolatiles should have been removed by MemBarNode::Ideal() for non-inline allocations
1052 assert(inline_alloc, "MemBarVolatile should be eliminated for non-escaping object");
1053 mb->remove(&_igvn);
1054 }
1055 }
1056 _igvn.replace_node(n, n->in(MemNode::Memory));
1057 } else {
1058 eliminate_gc_barrier(n);
1059 }
1060 k -= (oc2 - use->outcnt());
1061 }
1062 _igvn.remove_dead_node(use);
1063 } else if (use->is_ArrayCopy()) {
1064 // Disconnect ArrayCopy node
1065 ArrayCopyNode* ac = use->as_ArrayCopy();
1066 if (ac->is_clonebasic()) {
1067 Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out();
1068 disconnect_projections(ac, _igvn);
1069 assert(alloc->in(TypeFunc::Memory)->is_Proj() && alloc->in(TypeFunc::Memory)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation");
1070 Node* membar_before = alloc->in(TypeFunc::Memory)->in(0);
1071 disconnect_projections(membar_before->as_MemBar(), _igvn);
1072 if (membar_after->is_MemBar()) {
1073 disconnect_projections(membar_after->as_MemBar(), _igvn);
1074 }
1075 } else {
1076 assert(ac->is_arraycopy_validated() ||
1077 ac->is_copyof_validated() ||
1078 ac->is_copyofrange_validated(), "unsupported");
1079 CallProjections* callprojs = ac->extract_projections(true);
1080
1081 _igvn.replace_node(callprojs->fallthrough_ioproj, ac->in(TypeFunc::I_O));
1082 _igvn.replace_node(callprojs->fallthrough_memproj, ac->in(TypeFunc::Memory));
1083 _igvn.replace_node(callprojs->fallthrough_catchproj, ac->in(TypeFunc::Control));
1084
1085 // Set control to top. IGVN will remove the remaining projections
1086 ac->set_req(0, top());
1087 ac->replace_edge(res, top(), &_igvn);
1088
1089 // Disconnect src right away: it can help find new
1090 // opportunities for allocation elimination
1091 Node* src = ac->in(ArrayCopyNode::Src);
1092 ac->replace_edge(src, top(), &_igvn);
1093 // src can be top at this point if src and dest of the
1094 // arraycopy were the same
1095 if (src->outcnt() == 0 && !src->is_top()) {
1096 _igvn.remove_dead_node(src);
1097 }
1098 }
1099 _igvn._worklist.push(ac);
1100 } else if (use->is_InlineType()) {
1101 assert(use->as_InlineType()->get_oop() == res, "unexpected inline type ptr use");
1102 // Cut off oop input and remove known instance id from type
1103 _igvn.rehash_node_delayed(use);
1104 use->as_InlineType()->set_oop(_igvn, _igvn.zerocon(T_OBJECT));
1105 const TypeOopPtr* toop = _igvn.type(use)->is_oopptr()->cast_to_instance_id(TypeOopPtr::InstanceBot);
1106 _igvn.set_type(use, toop);
1107 use->as_InlineType()->set_type(toop);
1108 // Process users
1109 for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
1110 Node* u = use->fast_out(k);
1111 if (!u->is_InlineType()) {
1112 worklist.push(u);
1113 }
1114 }
1115 } else if (use->Opcode() == Op_StoreX && use->in(MemNode::Address) == res) {
1116 // Store to mark word of inline type larval buffer
1117 assert(inline_alloc, "Unexpected store to mark word");
1118 _igvn.replace_node(use, use->in(MemNode::Memory));
1119 } else if (use->Opcode() == Op_MemBarRelease) {
1120 // Inline type buffer allocations are followed by a membar
1121 assert(inline_alloc, "Unexpected MemBarRelease");
1122 use->as_MemBar()->remove(&_igvn);
1123 } else {
1124 eliminate_gc_barrier(use);
1125 }
1126 j -= (oc1 - res->outcnt());
1127 }
1128 assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
1129 _igvn.remove_dead_node(res);
1130 }
1131
1132 //
1133 // Process other users of allocation's projections
1134 //
1135 if (_callprojs->resproj[0] != nullptr && _callprojs->resproj[0]->outcnt() != 0) {
1136 // First disconnect stores captured by Initialize node.
1137 // If Initialize node is eliminated first in the following code,
1138 // it will kill such stores and DUIterator_Last will assert.
1139 for (DUIterator_Fast jmax, j = _callprojs->resproj[0]->fast_outs(jmax); j < jmax; j++) {
1140 Node* use = _callprojs->resproj[0]->fast_out(j);
1141 if (use->is_AddP()) {
1142 // raw memory addresses used only by the initialization
1143 _igvn.replace_node(use, C->top());
1144 --j; --jmax;
1145 }
1146 }
1147 for (DUIterator_Last jmin, j = _callprojs->resproj[0]->last_outs(jmin); j >= jmin; ) {
1148 Node* use = _callprojs->resproj[0]->last_out(j);
1149 uint oc1 = _callprojs->resproj[0]->outcnt();
1150 if (use->is_Initialize()) {
1151 // Eliminate Initialize node.
1152 InitializeNode *init = use->as_Initialize();
1153 assert(init->outcnt() <= 2, "only a control and memory projection expected");
1154 Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
1155 if (ctrl_proj != nullptr) {
1156 _igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control));
1157 #ifdef ASSERT
1158 // If the InitializeNode has no memory out, it will die, and tmp will become null
1159 Node* tmp = init->in(TypeFunc::Control);
1160 assert(tmp == nullptr || tmp == _callprojs->fallthrough_catchproj, "allocation control projection");
1161 #endif
1162 }
1163 Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory);
1164 if (mem_proj != nullptr) {
1165 Node *mem = init->in(TypeFunc::Memory);
1166 #ifdef ASSERT
1167 if (mem->is_MergeMem()) {
1168 assert(mem->in(TypeFunc::Memory) == _callprojs->fallthrough_memproj, "allocation memory projection");
1169 } else {
1170 assert(mem == _callprojs->fallthrough_memproj, "allocation memory projection");
1171 }
1172 #endif
1173 _igvn.replace_node(mem_proj, mem);
1174 }
1175 } else if (use->Opcode() == Op_MemBarStoreStore) {
1176 // Inline type buffer allocations are followed by a membar
1177 assert(inline_alloc, "Unexpected MemBarStoreStore");
1178 use->as_MemBar()->remove(&_igvn);
1179 } else {
1180 assert(false, "only Initialize or AddP expected");
1181 }
1182 j -= (oc1 - _callprojs->resproj[0]->outcnt());
1183 }
1184 }
1185 if (_callprojs->fallthrough_catchproj != nullptr) {
1186 _igvn.replace_node(_callprojs->fallthrough_catchproj, alloc->in(TypeFunc::Control));
1187 }
1188 if (_callprojs->fallthrough_memproj != nullptr) {
1189 _igvn.replace_node(_callprojs->fallthrough_memproj, alloc->in(TypeFunc::Memory));
1190 }
1191 if (_callprojs->catchall_memproj != nullptr) {
1192 _igvn.replace_node(_callprojs->catchall_memproj, C->top());
1193 }
1194 if (_callprojs->fallthrough_ioproj != nullptr) {
1195 _igvn.replace_node(_callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
1196 }
1197 if (_callprojs->catchall_ioproj != nullptr) {
1198 _igvn.replace_node(_callprojs->catchall_ioproj, C->top());
1199 }
1200 if (_callprojs->catchall_catchproj != nullptr) {
1201 _igvn.replace_node(_callprojs->catchall_catchproj, C->top());
1202 }
1203 }
1204
1205 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1206 // If reallocation fails during deoptimization we'll pop all
1207 // interpreter frames for this compiled frame and that won't play
1208 // nice with JVMTI popframe.
1209 // We avoid this issue by eager reallocation when the popframe request
1210 // is received.
1211 if (!EliminateAllocations) {
1212 return false;
1213 }
1214 Node* klass = alloc->in(AllocateNode::KlassNode);
1215 const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1216
1217 // Attempt to eliminate inline type buffer allocations
1218 // regardless of usage and escape/replaceable status.
1219 bool inline_alloc = tklass->isa_instklassptr() &&
1220 tklass->is_instklassptr()->instance_klass()->is_inlinetype();
1221 if (!alloc->_is_non_escaping && !inline_alloc) {
1222 return false;
1223 }
1224 // Eliminate boxing allocations which are not used
1225 // regardless scalar replaceable status.
1226 Node* res = alloc->result_cast();
1227 bool boxing_alloc = (res == nullptr) && C->eliminate_boxing() &&
1228 tklass->isa_instklassptr() &&
1229 tklass->is_instklassptr()->instance_klass()->is_box_klass();
1230 if (!alloc->_is_scalar_replaceable && !boxing_alloc && !inline_alloc) {
1231 return false;
1232 }
1233
1234 _callprojs = alloc->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
1235
1236 GrowableArray <SafePointNode *> safepoints;
1237 if (!can_eliminate_allocation(&_igvn, alloc, &safepoints)) {
1238 return false;
1239 }
1240
1241 if (!alloc->_is_scalar_replaceable) {
1242 assert(res == nullptr || inline_alloc, "sanity");
1243 // We can only eliminate allocation if all debug info references
1244 // are already replaced with SafePointScalarObject because
1245 // we can't search for a fields value without instance_id.
1246 if (safepoints.length() > 0) {
1247 assert(!inline_alloc, "Inline type allocations should not have safepoint uses");
1248 return false;
1249 }
1250 }
1251
1252 if (!scalar_replacement(alloc, safepoints)) {
1253 return false;
1254 }
1255
1256 CompileLog* log = C->log();
1257 if (log != nullptr) {
1258 log->head("eliminate_allocation type='%d'",
1259 log->identify(tklass->exact_klass()));
1260 JVMState* p = alloc->jvms();
1261 while (p != nullptr) {
1262 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1263 p = p->caller();
1264 }
1265 log->tail("eliminate_allocation");
1266 }
1267
1268 process_users_of_allocation(alloc, inline_alloc);
1269
1270 #ifndef PRODUCT
1271 if (PrintEliminateAllocations) {
1272 if (alloc->is_AllocateArray())
1273 tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1274 else
1275 tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1276 }
1277 #endif
1278
1279 return true;
1280 }
1281
1282 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1283 // EA should remove all uses of non-escaping boxing node.
1284 if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != nullptr) {
1285 return false;
1286 }
1287
1288 assert(boxing->result_cast() == nullptr, "unexpected boxing node result");
1289
1290 _callprojs = boxing->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
1291
1292 const TypeTuple* r = boxing->tf()->range_sig();
1293 assert(r->cnt() > TypeFunc::Parms, "sanity");
1294 const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1295 assert(t != nullptr, "sanity");
1296
1297 CompileLog* log = C->log();
1298 if (log != nullptr) {
1299 log->head("eliminate_boxing type='%d'",
1300 log->identify(t->instance_klass()));
1301 JVMState* p = boxing->jvms();
1302 while (p != nullptr) {
1303 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1304 p = p->caller();
1305 }
1306 log->tail("eliminate_boxing");
1307 }
1308
1309 process_users_of_allocation(boxing);
1310
1311 #ifndef PRODUCT
1312 if (PrintEliminateAllocations) {
1456 }
1457 }
1458 #endif
1459 yank_alloc_node(alloc);
1460 return;
1461 }
1462 }
1463
1464 enum { too_big_or_final_path = 1, need_gc_path = 2 };
1465 Node *slow_region = nullptr;
1466 Node *toobig_false = ctrl;
1467
1468 // generate the initial test if necessary
1469 if (initial_slow_test != nullptr ) {
1470 assert (expand_fast_path, "Only need test if there is a fast path");
1471 slow_region = new RegionNode(3);
1472
1473 // Now make the initial failure test. Usually a too-big test but
1474 // might be a TRUE for finalizers or a fancy class check for
1475 // newInstance0.
1476 IfNode* toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
1477 transform_later(toobig_iff);
1478 // Plug the failing-too-big test into the slow-path region
1479 Node* toobig_true = new IfTrueNode(toobig_iff);
1480 transform_later(toobig_true);
1481 slow_region ->init_req( too_big_or_final_path, toobig_true );
1482 toobig_false = new IfFalseNode(toobig_iff);
1483 transform_later(toobig_false);
1484 } else {
1485 // No initial test, just fall into next case
1486 assert(allocation_has_use || !expand_fast_path, "Should already have been handled");
1487 toobig_false = ctrl;
1488 debug_only(slow_region = NodeSentinel);
1489 }
1490
1491 // If we are here there are several possibilities
1492 // - expand_fast_path is false - then only a slow path is expanded. That's it.
1493 // no_initial_check means a constant allocation.
1494 // - If check always evaluates to false -> expand_fast_path is false (see above)
1495 // - If check always evaluates to true -> directly into fast path (but may bailout to slowpath)
1496 // if !allocation_has_use the fast path is empty
1497 // if !allocation_has_use && no_initial_check
1498 // - Then there are no fastpath that can fall out to slowpath -> no allocation code at all.
1499 // removed by yank_alloc_node above.
1500
1501 Node *slow_mem = mem; // save the current memory state for slow path
1502 // generate the fast allocation code unless we know that the initial test will always go slow
1503 if (expand_fast_path) {
1504 // Fast path modifies only raw memory.
1505 if (mem->is_MergeMem()) {
1506 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1507 }
1508
1509 // allocate the Region and Phi nodes for the result
1510 result_region = new RegionNode(3);
1511 result_phi_rawmem = new PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1512 result_phi_i_o = new PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
1513
1514 // Grab regular I/O before optional prefetch may change it.
1515 // Slow-path does no I/O so just set it to the original I/O.
1516 result_phi_i_o->init_req(slow_result_path, i_o);
1517
1518 // Name successful fast-path variables
1519 Node* fast_oop_ctrl;
1520 Node* fast_oop_rawmem;
1521
1522 if (allocation_has_use) {
1523 Node* needgc_ctrl = nullptr;
1524 result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
1525
1526 intx prefetch_lines = length != nullptr ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
1527 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1528 Node* fast_oop = bs->obj_allocate(this, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
1529 fast_oop_ctrl, fast_oop_rawmem,
1530 prefetch_lines);
1531
1532 if (initial_slow_test != nullptr) {
1533 // This completes all paths into the slow merge point
1534 slow_region->init_req(need_gc_path, needgc_ctrl);
1535 transform_later(slow_region);
1536 } else {
1537 // No initial slow path needed!
1538 // Just fall from the need-GC path straight into the VM call.
1539 slow_region = needgc_ctrl;
1540 }
1541
1559 result_phi_i_o ->init_req(fast_result_path, i_o);
1560 result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
1561 } else {
1562 slow_region = ctrl;
1563 result_phi_i_o = i_o; // Rename it to use in the following code.
1564 }
1565
1566 // Generate slow-path call
1567 CallNode *call = new CallStaticJavaNode(slow_call_type, slow_call_address,
1568 OptoRuntime::stub_name(slow_call_address),
1569 TypePtr::BOTTOM);
1570 call->init_req(TypeFunc::Control, slow_region);
1571 call->init_req(TypeFunc::I_O, top()); // does no i/o
1572 call->init_req(TypeFunc::Memory, slow_mem); // may gc ptrs
1573 call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1574 call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
1575
1576 call->init_req(TypeFunc::Parms+0, klass_node);
1577 if (length != nullptr) {
1578 call->init_req(TypeFunc::Parms+1, length);
1579 } else {
1580 // Let the runtime know if this is a larval allocation
1581 call->init_req(TypeFunc::Parms+1, _igvn.intcon(alloc->_larval));
1582 }
1583
1584 // Copy debug information and adjust JVMState information, then replace
1585 // allocate node with the call
1586 call->copy_call_debug_info(&_igvn, alloc);
1587 // For array allocations, copy the valid length check to the call node so Compile::final_graph_reshaping() can verify
1588 // that the call has the expected number of CatchProj nodes (in case the allocation always fails and the fallthrough
1589 // path dies).
1590 if (valid_length_test != nullptr) {
1591 call->add_req(valid_length_test);
1592 }
1593 if (expand_fast_path) {
1594 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
1595 } else {
1596 // Hook i_o projection to avoid its elimination during allocation
1597 // replacement (when only a slow call is generated).
1598 call->set_req(TypeFunc::I_O, result_phi_i_o);
1599 }
1600 _igvn.replace_node(alloc, call);
1601 transform_later(call);
1602
1603 // Identify the output projections from the allocate node and
1604 // adjust any references to them.
1605 // The control and io projections look like:
1606 //
1607 // v---Proj(ctrl) <-----+ v---CatchProj(ctrl)
1608 // Allocate Catch
1609 // ^---Proj(io) <-------+ ^---CatchProj(io)
1610 //
1611 // We are interested in the CatchProj nodes.
1612 //
1613 _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
1614
1615 // An allocate node has separate memory projections for the uses on
1616 // the control and i_o paths. Replace the control memory projection with
1617 // result_phi_rawmem (unless we are only generating a slow call when
1618 // both memory projections are combined)
1619 if (expand_fast_path && _callprojs->fallthrough_memproj != nullptr) {
1620 _igvn.replace_in_uses(_callprojs->fallthrough_memproj, result_phi_rawmem);
1621 }
1622 // Now change uses of catchall_memproj to use fallthrough_memproj and delete
1623 // catchall_memproj so we end up with a call that has only 1 memory projection.
1624 if (_callprojs->catchall_memproj != nullptr) {
1625 if (_callprojs->fallthrough_memproj == nullptr) {
1626 _callprojs->fallthrough_memproj = new ProjNode(call, TypeFunc::Memory);
1627 transform_later(_callprojs->fallthrough_memproj);
1628 }
1629 _igvn.replace_in_uses(_callprojs->catchall_memproj, _callprojs->fallthrough_memproj);
1630 _igvn.remove_dead_node(_callprojs->catchall_memproj);
1631 }
1632
1633 // An allocate node has separate i_o projections for the uses on the control
1634 // and i_o paths. Always replace the control i_o projection with result i_o
1635 // otherwise incoming i_o become dead when only a slow call is generated
1636 // (it is different from memory projections where both projections are
1637 // combined in such case).
1638 if (_callprojs->fallthrough_ioproj != nullptr) {
1639 _igvn.replace_in_uses(_callprojs->fallthrough_ioproj, result_phi_i_o);
1640 }
1641 // Now change uses of catchall_ioproj to use fallthrough_ioproj and delete
1642 // catchall_ioproj so we end up with a call that has only 1 i_o projection.
1643 if (_callprojs->catchall_ioproj != nullptr) {
1644 if (_callprojs->fallthrough_ioproj == nullptr) {
1645 _callprojs->fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O);
1646 transform_later(_callprojs->fallthrough_ioproj);
1647 }
1648 _igvn.replace_in_uses(_callprojs->catchall_ioproj, _callprojs->fallthrough_ioproj);
1649 _igvn.remove_dead_node(_callprojs->catchall_ioproj);
1650 }
1651
1652 // if we generated only a slow call, we are done
1653 if (!expand_fast_path) {
1654 // Now we can unhook i_o.
1655 if (result_phi_i_o->outcnt() > 1) {
1656 call->set_req(TypeFunc::I_O, top());
1657 } else {
1658 assert(result_phi_i_o->unique_ctrl_out() == call, "sanity");
1659 // Case of new array with negative size known during compilation.
1660 // AllocateArrayNode::Ideal() optimization disconnect unreachable
1661 // following code since call to runtime will throw exception.
1662 // As result there will be no users of i_o after the call.
1663 // Leave i_o attached to this call to avoid problems in preceding graph.
1664 }
1665 return;
1666 }
1667
1668 if (_callprojs->fallthrough_catchproj != nullptr) {
1669 ctrl = _callprojs->fallthrough_catchproj->clone();
1670 transform_later(ctrl);
1671 _igvn.replace_node(_callprojs->fallthrough_catchproj, result_region);
1672 } else {
1673 ctrl = top();
1674 }
1675 Node *slow_result;
1676 if (_callprojs->resproj[0] == nullptr) {
1677 // no uses of the allocation result
1678 slow_result = top();
1679 } else {
1680 slow_result = _callprojs->resproj[0]->clone();
1681 transform_later(slow_result);
1682 _igvn.replace_node(_callprojs->resproj[0], result_phi_rawoop);
1683 }
1684
1685 // Plug slow-path into result merge point
1686 result_region->init_req( slow_result_path, ctrl);
1687 transform_later(result_region);
1688 if (allocation_has_use) {
1689 result_phi_rawoop->init_req(slow_result_path, slow_result);
1690 transform_later(result_phi_rawoop);
1691 }
1692 result_phi_rawmem->init_req(slow_result_path, _callprojs->fallthrough_memproj);
1693 transform_later(result_phi_rawmem);
1694 transform_later(result_phi_i_o);
1695 // This completes all paths into the result merge point
1696 }
1697
1698 // Remove alloc node that has no uses.
1699 void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
1700 Node* ctrl = alloc->in(TypeFunc::Control);
1701 Node* mem = alloc->in(TypeFunc::Memory);
1702 Node* i_o = alloc->in(TypeFunc::I_O);
1703
1704 _callprojs = alloc->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
1705 if (_callprojs->resproj[0] != nullptr) {
1706 for (DUIterator_Fast imax, i = _callprojs->resproj[0]->fast_outs(imax); i < imax; i++) {
1707 Node* use = _callprojs->resproj[0]->fast_out(i);
1708 use->isa_MemBar()->remove(&_igvn);
1709 --imax;
1710 --i; // back up iterator
1711 }
1712 assert(_callprojs->resproj[0]->outcnt() == 0, "all uses must be deleted");
1713 _igvn.remove_dead_node(_callprojs->resproj[0]);
1714 }
1715 if (_callprojs->fallthrough_catchproj != nullptr) {
1716 _igvn.replace_in_uses(_callprojs->fallthrough_catchproj, ctrl);
1717 _igvn.remove_dead_node(_callprojs->fallthrough_catchproj);
1718 }
1719 if (_callprojs->catchall_catchproj != nullptr) {
1720 _igvn.rehash_node_delayed(_callprojs->catchall_catchproj);
1721 _callprojs->catchall_catchproj->set_req(0, top());
1722 }
1723 if (_callprojs->fallthrough_proj != nullptr) {
1724 Node* catchnode = _callprojs->fallthrough_proj->unique_ctrl_out();
1725 _igvn.remove_dead_node(catchnode);
1726 _igvn.remove_dead_node(_callprojs->fallthrough_proj);
1727 }
1728 if (_callprojs->fallthrough_memproj != nullptr) {
1729 _igvn.replace_in_uses(_callprojs->fallthrough_memproj, mem);
1730 _igvn.remove_dead_node(_callprojs->fallthrough_memproj);
1731 }
1732 if (_callprojs->fallthrough_ioproj != nullptr) {
1733 _igvn.replace_in_uses(_callprojs->fallthrough_ioproj, i_o);
1734 _igvn.remove_dead_node(_callprojs->fallthrough_ioproj);
1735 }
1736 if (_callprojs->catchall_memproj != nullptr) {
1737 _igvn.rehash_node_delayed(_callprojs->catchall_memproj);
1738 _callprojs->catchall_memproj->set_req(0, top());
1739 }
1740 if (_callprojs->catchall_ioproj != nullptr) {
1741 _igvn.rehash_node_delayed(_callprojs->catchall_ioproj);
1742 _callprojs->catchall_ioproj->set_req(0, top());
1743 }
1744 #ifndef PRODUCT
1745 if (PrintEliminateAllocations) {
1746 if (alloc->is_AllocateArray()) {
1747 tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1748 } else {
1749 tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1750 }
1751 }
1752 #endif
1753 _igvn.remove_dead_node(alloc);
1754 }
1755
1756 void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeNode* init,
1757 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem) {
1758 // If initialization is performed by an array copy, any required
1759 // MemBarStoreStore was already added. If the object does not
1760 // escape no need for a MemBarStoreStore. If the object does not
1761 // escape in its initializer and memory barrier (MemBarStoreStore or
1762 // stronger) is already added at exit of initializer, also no need
1840 Node* thread = new ThreadLocalNode();
1841 transform_later(thread);
1842
1843 call->init_req(TypeFunc::Parms + 0, thread);
1844 call->init_req(TypeFunc::Parms + 1, oop);
1845 call->init_req(TypeFunc::Control, ctrl);
1846 call->init_req(TypeFunc::I_O , top()); // does no i/o
1847 call->init_req(TypeFunc::Memory , rawmem);
1848 call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1849 call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
1850 transform_later(call);
1851 ctrl = new ProjNode(call, TypeFunc::Control);
1852 transform_later(ctrl);
1853 rawmem = new ProjNode(call, TypeFunc::Memory);
1854 transform_later(rawmem);
1855 }
1856 }
1857
1858 // Helper for PhaseMacroExpand::expand_allocate_common.
1859 // Initializes the newly-allocated storage.
1860 Node* PhaseMacroExpand::initialize_object(AllocateNode* alloc,
1861 Node* control, Node* rawmem, Node* object,
1862 Node* klass_node, Node* length,
1863 Node* size_in_bytes) {
1864 InitializeNode* init = alloc->initialization();
1865 // Store the klass & mark bits
1866 Node* mark_node = alloc->make_ideal_mark(&_igvn, control, rawmem);
1867 if (!mark_node->is_Con()) {
1868 transform_later(mark_node);
1869 }
1870 rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
1871
1872 rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
1873 int header_size = alloc->minimum_header_size(); // conservatively small
1874
1875 // Array length
1876 if (length != nullptr) { // Arrays need length field
1877 rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1878 // conservatively small header size:
1879 header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1880 if (_igvn.type(klass_node)->isa_aryklassptr()) { // we know the exact header size in most cases:
1881 BasicType elem = _igvn.type(klass_node)->is_klassptr()->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
1882 if (is_reference_type(elem, true)) {
1883 elem = T_OBJECT;
1884 }
1885 header_size = Klass::layout_helper_header_size(Klass::array_layout_helper(elem));
1886 }
1887 }
1888
1889 // Clear the object body, if necessary.
1890 if (init == nullptr) {
1891 // The init has somehow disappeared; be cautious and clear everything.
1892 //
1893 // This can happen if a node is allocated but an uncommon trap occurs
1894 // immediately. In this case, the Initialize gets associated with the
1895 // trap, and may be placed in a different (outer) loop, if the Allocate
1896 // is in a loop. If (this is rare) the inner loop gets unrolled, then
1897 // there can be two Allocates to one Initialize. The answer in all these
1898 // edge cases is safety first. It is always safe to clear immediately
1899 // within an Allocate, and then (maybe or maybe not) clear some more later.
1900 if (!(UseTLAB && ZeroTLAB)) {
1901 rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
1902 alloc->in(AllocateNode::DefaultValue),
1903 alloc->in(AllocateNode::RawDefaultValue),
1904 header_size, size_in_bytes,
1905 &_igvn);
1906 }
1907 } else {
1908 if (!init->is_complete()) {
1909 // Try to win by zeroing only what the init does not store.
1910 // We can also try to do some peephole optimizations,
1911 // such as combining some adjacent subword stores.
1912 rawmem = init->complete_stores(control, rawmem, object,
1913 header_size, size_in_bytes, &_igvn);
1914 }
1915 // We have no more use for this link, since the AllocateNode goes away:
1916 init->set_req(InitializeNode::RawAddress, top());
1917 // (If we keep the link, it just confuses the register allocator,
1918 // who thinks he sees a real use of the address by the membar.)
1919 }
1920
1921 return rawmem;
1922 }
1923
2284 #ifdef ASSERT
2285 if (!alock->is_coarsened()) {
2286 // Check that new "eliminated" BoxLock node is created.
2287 BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
2288 assert(oldbox->is_eliminated(), "should be done already");
2289 }
2290 #endif
2291
2292 alock->log_lock_optimization(C, "eliminate_lock");
2293
2294 #ifndef PRODUCT
2295 if (PrintEliminateLocks) {
2296 tty->print_cr("++++ Eliminated: %d %s '%s'", alock->_idx, (alock->is_Lock() ? "Lock" : "Unlock"), alock->kind_as_string());
2297 }
2298 #endif
2299
2300 Node* mem = alock->in(TypeFunc::Memory);
2301 Node* ctrl = alock->in(TypeFunc::Control);
2302 guarantee(ctrl != nullptr, "missing control projection, cannot replace_node() with null");
2303
2304 _callprojs = alock->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
2305 // There are 2 projections from the lock. The lock node will
2306 // be deleted when its last use is subsumed below.
2307 assert(alock->outcnt() == 2 &&
2308 _callprojs->fallthrough_proj != nullptr &&
2309 _callprojs->fallthrough_memproj != nullptr,
2310 "Unexpected projections from Lock/Unlock");
2311
2312 Node* fallthroughproj = _callprojs->fallthrough_proj;
2313 Node* memproj_fallthrough = _callprojs->fallthrough_memproj;
2314
2315 // The memory projection from a lock/unlock is RawMem
2316 // The input to a Lock is merged memory, so extract its RawMem input
2317 // (unless the MergeMem has been optimized away.)
2318 if (alock->is_Lock()) {
2319 // Search for MemBarAcquireLock node and delete it also.
2320 MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
2321 assert(membar != nullptr && membar->Opcode() == Op_MemBarAcquireLock, "");
2322 Node* ctrlproj = membar->proj_out(TypeFunc::Control);
2323 Node* memproj = membar->proj_out(TypeFunc::Memory);
2324 _igvn.replace_node(ctrlproj, fallthroughproj);
2325 _igvn.replace_node(memproj, memproj_fallthrough);
2326
2327 // Delete FastLock node also if this Lock node is unique user
2328 // (a loop peeling may clone a Lock node).
2329 Node* flock = alock->as_Lock()->fastlock_node();
2330 if (flock->outcnt() == 1) {
2331 assert(flock->unique_out() == alock, "sanity");
2332 _igvn.replace_node(flock, top());
2333 }
2364 assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2365
2366 // Make the merge point
2367 Node *region;
2368 Node *mem_phi;
2369 Node *slow_path;
2370
2371 region = new RegionNode(3);
2372 // create a Phi for the memory state
2373 mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2374
2375 // Optimize test; set region slot 2
2376 slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0);
2377 mem_phi->init_req(2, mem);
2378
2379 // Make slow path call
2380 CallNode *call = make_slow_call((CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(),
2381 OptoRuntime::complete_monitor_locking_Java(), nullptr, slow_path,
2382 obj, box, nullptr);
2383
2384 _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
2385
2386 // Slow path can only throw asynchronous exceptions, which are always
2387 // de-opted. So the compiler thinks the slow-call can never throw an
2388 // exception. If it DOES throw an exception we would need the debug
2389 // info removed first (since if it throws there is no monitor).
2390 assert(_callprojs->fallthrough_ioproj == nullptr && _callprojs->catchall_ioproj == nullptr &&
2391 _callprojs->catchall_memproj == nullptr && _callprojs->catchall_catchproj == nullptr, "Unexpected projection from Lock");
2392
2393 // Capture slow path
2394 // disconnect fall-through projection from call and create a new one
2395 // hook up users of fall-through projection to region
2396 Node *slow_ctrl = _callprojs->fallthrough_proj->clone();
2397 transform_later(slow_ctrl);
2398 _igvn.hash_delete(_callprojs->fallthrough_proj);
2399 _callprojs->fallthrough_proj->disconnect_inputs(C);
2400 region->init_req(1, slow_ctrl);
2401 // region inputs are now complete
2402 transform_later(region);
2403 _igvn.replace_node(_callprojs->fallthrough_proj, region);
2404
2405 Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory));
2406
2407 mem_phi->init_req(1, memproj);
2408
2409 transform_later(mem_phi);
2410
2411 _igvn.replace_node(_callprojs->fallthrough_memproj, mem_phi);
2412 }
2413
2414 //------------------------------expand_unlock_node----------------------
2415 void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
2416
2417 Node* ctrl = unlock->in(TypeFunc::Control);
2418 Node* mem = unlock->in(TypeFunc::Memory);
2419 Node* obj = unlock->obj_node();
2420 Node* box = unlock->box_node();
2421
2422 assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2423
2424 // No need for a null check on unlock
2425
2426 // Make the merge point
2427 Node *region;
2428 Node *mem_phi;
2429
2430 region = new RegionNode(3);
2431 // create a Phi for the memory state
2432 mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2433
2434 FastUnlockNode *funlock = new FastUnlockNode( ctrl, obj, box );
2435 funlock = transform_later( funlock )->as_FastUnlock();
2436 // Optimize test; set region slot 2
2437 Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0);
2438 Node *thread = transform_later(new ThreadLocalNode());
2439
2440 CallNode *call = make_slow_call((CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(),
2441 CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
2442 "complete_monitor_unlocking_C", slow_path, obj, box, thread);
2443
2444 _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
2445 assert(_callprojs->fallthrough_ioproj == nullptr && _callprojs->catchall_ioproj == nullptr &&
2446 _callprojs->catchall_memproj == nullptr && _callprojs->catchall_catchproj == nullptr, "Unexpected projection from Lock");
2447
2448 // No exceptions for unlocking
2449 // Capture slow path
2450 // disconnect fall-through projection from call and create a new one
2451 // hook up users of fall-through projection to region
2452 Node *slow_ctrl = _callprojs->fallthrough_proj->clone();
2453 transform_later(slow_ctrl);
2454 _igvn.hash_delete(_callprojs->fallthrough_proj);
2455 _callprojs->fallthrough_proj->disconnect_inputs(C);
2456 region->init_req(1, slow_ctrl);
2457 // region inputs are now complete
2458 transform_later(region);
2459 _igvn.replace_node(_callprojs->fallthrough_proj, region);
2460
2461 Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) );
2462 mem_phi->init_req(1, memproj );
2463 mem_phi->init_req(2, mem);
2464 transform_later(mem_phi);
2465
2466 _igvn.replace_node(_callprojs->fallthrough_memproj, mem_phi);
2467 }
2468
2469 // An inline type might be returned from the call but we don't know its
2470 // type. Either we get a buffered inline type (and nothing needs to be done)
2471 // or one of the values being returned is the klass of the inline type
2472 // and we need to allocate an inline type instance of that type and
2473 // initialize it with other values being returned. In that case, we
2474 // first try a fast path allocation and initialize the value with the
2475 // inline klass's pack handler or we fall back to a runtime call.
2476 void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
2477 assert(call->method()->is_method_handle_intrinsic(), "must be a method handle intrinsic call");
2478 Node* ret = call->proj_out_or_null(TypeFunc::Parms);
2479 if (ret == nullptr) {
2480 return;
2481 }
2482 const TypeFunc* tf = call->_tf;
2483 const TypeTuple* domain = OptoRuntime::store_inline_type_fields_Type()->domain_cc();
2484 const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain);
2485 call->_tf = new_tf;
2486 // Make sure the change of type is applied before projections are processed by igvn
2487 _igvn.set_type(call, call->Value(&_igvn));
2488 _igvn.set_type(ret, ret->Value(&_igvn));
2489
2490 // Before any new projection is added:
2491 CallProjections* projs = call->extract_projections(true, true);
2492
2493 // Create temporary hook nodes that will be replaced below.
2494 // Add an input to prevent hook nodes from being dead.
2495 Node* ctl = new Node(call);
2496 Node* mem = new Node(ctl);
2497 Node* io = new Node(ctl);
2498 Node* ex_ctl = new Node(ctl);
2499 Node* ex_mem = new Node(ctl);
2500 Node* ex_io = new Node(ctl);
2501 Node* res = new Node(ctl);
2502
2503 // Allocate a new buffered inline type only if a new one is not returned
2504 Node* cast = transform_later(new CastP2XNode(ctl, res));
2505 Node* mask = MakeConX(0x1);
2506 Node* masked = transform_later(new AndXNode(cast, mask));
2507 Node* cmp = transform_later(new CmpXNode(masked, mask));
2508 Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
2509 IfNode* allocation_iff = new IfNode(ctl, bol, PROB_MAX, COUNT_UNKNOWN);
2510 transform_later(allocation_iff);
2511 Node* allocation_ctl = transform_later(new IfTrueNode(allocation_iff));
2512 Node* no_allocation_ctl = transform_later(new IfFalseNode(allocation_iff));
2513 Node* no_allocation_res = transform_later(new CheckCastPPNode(no_allocation_ctl, res, TypeInstPtr::BOTTOM));
2514
2515 // Try to allocate a new buffered inline instance either from TLAB or eden space
2516 Node* needgc_ctrl = nullptr; // needgc means slowcase, i.e. allocation failed
2517 CallLeafNoFPNode* handler_call;
2518 const bool alloc_in_place = UseTLAB;
2519 if (alloc_in_place) {
2520 Node* fast_oop_ctrl = nullptr;
2521 Node* fast_oop_rawmem = nullptr;
2522 Node* mask2 = MakeConX(-2);
2523 Node* masked2 = transform_later(new AndXNode(cast, mask2));
2524 Node* rawklassptr = transform_later(new CastX2PNode(masked2));
2525 Node* klass_node = transform_later(new CheckCastPPNode(allocation_ctl, rawklassptr, TypeInstKlassPtr::OBJECT_OR_NULL));
2526 Node* layout_val = make_load(nullptr, mem, klass_node, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT);
2527 Node* size_in_bytes = ConvI2X(layout_val);
2528 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2529 Node* fast_oop = bs->obj_allocate(this, mem, allocation_ctl, size_in_bytes, io, needgc_ctrl,
2530 fast_oop_ctrl, fast_oop_rawmem,
2531 AllocateInstancePrefetchLines);
2532 // Allocation succeed, initialize buffered inline instance header firstly,
2533 // and then initialize its fields with an inline class specific handler
2534 Node* mark_node = makecon(TypeRawPtr::make((address)markWord::inline_type_prototype().value()));
2535 fast_oop_rawmem = make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
2536 fast_oop_rawmem = make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
2537 if (UseCompressedClassPointers) {
2538 fast_oop_rawmem = make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT);
2539 }
2540 Node* fixed_block = make_load(fast_oop_ctrl, fast_oop_rawmem, klass_node, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2541 Node* pack_handler = make_load(fast_oop_ctrl, fast_oop_rawmem, fixed_block, in_bytes(InlineKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2542 handler_call = new CallLeafNoFPNode(OptoRuntime::pack_inline_type_Type(),
2543 nullptr,
2544 "pack handler",
2545 TypeRawPtr::BOTTOM);
2546 handler_call->init_req(TypeFunc::Control, fast_oop_ctrl);
2547 handler_call->init_req(TypeFunc::Memory, fast_oop_rawmem);
2548 handler_call->init_req(TypeFunc::I_O, top());
2549 handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2550 handler_call->init_req(TypeFunc::ReturnAdr, top());
2551 handler_call->init_req(TypeFunc::Parms, pack_handler);
2552 handler_call->init_req(TypeFunc::Parms+1, fast_oop);
2553 } else {
2554 needgc_ctrl = allocation_ctl;
2555 }
2556
2557 // Allocation failed, fall back to a runtime call
2558 CallStaticJavaNode* slow_call = new CallStaticJavaNode(OptoRuntime::store_inline_type_fields_Type(),
2559 StubRoutines::store_inline_type_fields_to_buf(),
2560 "store_inline_type_fields",
2561 TypePtr::BOTTOM);
2562 slow_call->init_req(TypeFunc::Control, needgc_ctrl);
2563 slow_call->init_req(TypeFunc::Memory, mem);
2564 slow_call->init_req(TypeFunc::I_O, io);
2565 slow_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2566 slow_call->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
2567 slow_call->init_req(TypeFunc::Parms, res);
2568
2569 Node* slow_ctl = transform_later(new ProjNode(slow_call, TypeFunc::Control));
2570 Node* slow_mem = transform_later(new ProjNode(slow_call, TypeFunc::Memory));
2571 Node* slow_io = transform_later(new ProjNode(slow_call, TypeFunc::I_O));
2572 Node* slow_res = transform_later(new ProjNode(slow_call, TypeFunc::Parms));
2573 Node* slow_catc = transform_later(new CatchNode(slow_ctl, slow_io, 2));
2574 Node* slow_norm = transform_later(new CatchProjNode(slow_catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci));
2575 Node* slow_excp = transform_later(new CatchProjNode(slow_catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci));
2576
2577 Node* ex_r = new RegionNode(3);
2578 Node* ex_mem_phi = new PhiNode(ex_r, Type::MEMORY, TypePtr::BOTTOM);
2579 Node* ex_io_phi = new PhiNode(ex_r, Type::ABIO);
2580 ex_r->init_req(1, slow_excp);
2581 ex_mem_phi->init_req(1, slow_mem);
2582 ex_io_phi->init_req(1, slow_io);
2583 ex_r->init_req(2, ex_ctl);
2584 ex_mem_phi->init_req(2, ex_mem);
2585 ex_io_phi->init_req(2, ex_io);
2586 transform_later(ex_r);
2587 transform_later(ex_mem_phi);
2588 transform_later(ex_io_phi);
2589
2590 // We don't know how many values are returned. This assumes the
2591 // worst case, that all available registers are used.
2592 for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
2593 if (domain->field_at(i) == Type::HALF) {
2594 slow_call->init_req(i, top());
2595 if (alloc_in_place) {
2596 handler_call->init_req(i+1, top());
2597 }
2598 continue;
2599 }
2600 Node* proj = transform_later(new ProjNode(call, i));
2601 slow_call->init_req(i, proj);
2602 if (alloc_in_place) {
2603 handler_call->init_req(i+1, proj);
2604 }
2605 }
2606 // We can safepoint at that new call
2607 slow_call->copy_call_debug_info(&_igvn, call);
2608 transform_later(slow_call);
2609 if (alloc_in_place) {
2610 transform_later(handler_call);
2611 }
2612
2613 Node* fast_ctl = nullptr;
2614 Node* fast_res = nullptr;
2615 MergeMemNode* fast_mem = nullptr;
2616 if (alloc_in_place) {
2617 fast_ctl = transform_later(new ProjNode(handler_call, TypeFunc::Control));
2618 Node* rawmem = transform_later(new ProjNode(handler_call, TypeFunc::Memory));
2619 fast_res = transform_later(new ProjNode(handler_call, TypeFunc::Parms));
2620 fast_mem = MergeMemNode::make(mem);
2621 fast_mem->set_memory_at(Compile::AliasIdxRaw, rawmem);
2622 transform_later(fast_mem);
2623 }
2624
2625 Node* r = new RegionNode(alloc_in_place ? 4 : 3);
2626 Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2627 Node* io_phi = new PhiNode(r, Type::ABIO);
2628 Node* res_phi = new PhiNode(r, TypeInstPtr::BOTTOM);
2629 r->init_req(1, no_allocation_ctl);
2630 mem_phi->init_req(1, mem);
2631 io_phi->init_req(1, io);
2632 res_phi->init_req(1, no_allocation_res);
2633 r->init_req(2, slow_norm);
2634 mem_phi->init_req(2, slow_mem);
2635 io_phi->init_req(2, slow_io);
2636 res_phi->init_req(2, slow_res);
2637 if (alloc_in_place) {
2638 r->init_req(3, fast_ctl);
2639 mem_phi->init_req(3, fast_mem);
2640 io_phi->init_req(3, io);
2641 res_phi->init_req(3, fast_res);
2642 }
2643 transform_later(r);
2644 transform_later(mem_phi);
2645 transform_later(io_phi);
2646 transform_later(res_phi);
2647
2648 // Do not let stores that initialize this buffer be reordered with a subsequent
2649 // store that would make this buffer accessible by other threads.
2650 MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
2651 transform_later(mb);
2652 mb->init_req(TypeFunc::Memory, mem_phi);
2653 mb->init_req(TypeFunc::Control, r);
2654 r = new ProjNode(mb, TypeFunc::Control);
2655 transform_later(r);
2656 mem_phi = new ProjNode(mb, TypeFunc::Memory);
2657 transform_later(mem_phi);
2658
2659 assert(projs->nb_resproj == 1, "unexpected number of results");
2660 _igvn.replace_in_uses(projs->fallthrough_catchproj, r);
2661 _igvn.replace_in_uses(projs->fallthrough_memproj, mem_phi);
2662 _igvn.replace_in_uses(projs->fallthrough_ioproj, io_phi);
2663 _igvn.replace_in_uses(projs->resproj[0], res_phi);
2664 _igvn.replace_in_uses(projs->catchall_catchproj, ex_r);
2665 _igvn.replace_in_uses(projs->catchall_memproj, ex_mem_phi);
2666 _igvn.replace_in_uses(projs->catchall_ioproj, ex_io_phi);
2667 // The CatchNode should not use the ex_io_phi. Re-connect it to the catchall_ioproj.
2668 Node* cn = projs->fallthrough_catchproj->in(0);
2669 _igvn.replace_input_of(cn, 1, projs->catchall_ioproj);
2670
2671 _igvn.replace_node(ctl, projs->fallthrough_catchproj);
2672 _igvn.replace_node(mem, projs->fallthrough_memproj);
2673 _igvn.replace_node(io, projs->fallthrough_ioproj);
2674 _igvn.replace_node(res, projs->resproj[0]);
2675 _igvn.replace_node(ex_ctl, projs->catchall_catchproj);
2676 _igvn.replace_node(ex_mem, projs->catchall_memproj);
2677 _igvn.replace_node(ex_io, projs->catchall_ioproj);
2678 }
2679
2680 void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) {
2681 assert(check->in(SubTypeCheckNode::Control) == nullptr, "should be pinned");
2682 Node* bol = check->unique_out();
2683 Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass);
2684 Node* superklass = check->in(SubTypeCheckNode::SuperKlass);
2685 assert(bol->is_Bool() && bol->as_Bool()->_test._test == BoolTest::ne, "unexpected bool node");
2686
2687 for (DUIterator_Last imin, i = bol->last_outs(imin); i >= imin; --i) {
2688 Node* iff = bol->last_out(i);
2689 assert(iff->is_If(), "where's the if?");
2690
2691 if (iff->in(0)->is_top()) {
2692 _igvn.replace_input_of(iff, 1, C->top());
2693 continue;
2694 }
2695
2696 Node* iftrue = iff->as_If()->proj_out(1);
2697 Node* iffalse = iff->as_If()->proj_out(0);
2698 Node* ctrl = iff->in(0);
2699
2700 Node* subklass = nullptr;
2701 if (_igvn.type(obj_or_subklass)->isa_klassptr()) {
2702 subklass = obj_or_subklass;
2703 } else {
2704 Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes());
2705 subklass = _igvn.transform(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
2706 }
2707
2708 Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, nullptr, _igvn, check->method(), check->bci());
2709
2710 _igvn.replace_input_of(iff, 0, C->top());
2711 _igvn.replace_node(iftrue, not_subtype_ctrl);
2712 _igvn.replace_node(iffalse, ctrl);
2713 }
2714 _igvn.replace_node(check, C->top());
2715 }
2716
2717 // FlatArrayCheckNode (array1 array2 ...) is expanded into:
2718 //
2719 // long mark = array1.mark | array2.mark | ...;
2720 // long locked_bit = markWord::unlocked_value & array1.mark & array2.mark & ...;
2721 // if (locked_bit == 0) {
2722 // // One array is locked, load prototype header from the klass
2723 // mark = array1.klass.proto | array2.klass.proto | ...
2724 // }
2725 // if ((mark & markWord::flat_array_bit_in_place) == 0) {
2726 // ...
2727 // }
2728 void PhaseMacroExpand::expand_flatarraycheck_node(FlatArrayCheckNode* check) {
2729 bool array_inputs = _igvn.type(check->in(FlatArrayCheckNode::ArrayOrKlass))->isa_oopptr() != nullptr;
2730 if (array_inputs) {
2731 Node* mark = MakeConX(0);
2732 Node* locked_bit = MakeConX(markWord::unlocked_value);
2733 Node* mem = check->in(FlatArrayCheckNode::Memory);
2734 for (uint i = FlatArrayCheckNode::ArrayOrKlass; i < check->req(); ++i) {
2735 Node* ary = check->in(i);
2736 const TypeOopPtr* t = _igvn.type(ary)->isa_oopptr();
2737 assert(t != nullptr, "Mixing array and klass inputs");
2738 assert(!t->is_flat() && !t->is_not_flat(), "Should have been optimized out");
2739 Node* mark_adr = basic_plus_adr(ary, oopDesc::mark_offset_in_bytes());
2740 Node* mark_load = _igvn.transform(LoadNode::make(_igvn, nullptr, mem, mark_adr, mark_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
2741 mark = _igvn.transform(new OrXNode(mark, mark_load));
2742 locked_bit = _igvn.transform(new AndXNode(locked_bit, mark_load));
2743 }
2744 assert(!mark->is_Con(), "Should have been optimized out");
2745 Node* cmp = _igvn.transform(new CmpXNode(locked_bit, MakeConX(0)));
2746 Node* is_unlocked = _igvn.transform(new BoolNode(cmp, BoolTest::ne));
2747
2748 // BoolNode might be shared, replace each if user
2749 Node* old_bol = check->unique_out();
2750 assert(old_bol->is_Bool() && old_bol->as_Bool()->_test._test == BoolTest::ne, "unexpected condition");
2751 for (DUIterator_Last imin, i = old_bol->last_outs(imin); i >= imin; --i) {
2752 IfNode* old_iff = old_bol->last_out(i)->as_If();
2753 Node* ctrl = old_iff->in(0);
2754 RegionNode* region = new RegionNode(3);
2755 Node* mark_phi = new PhiNode(region, TypeX_X);
2756
2757 // Check if array is unlocked
2758 IfNode* iff = _igvn.transform(new IfNode(ctrl, is_unlocked, PROB_MAX, COUNT_UNKNOWN))->as_If();
2759
2760 // Unlocked: Use bits from mark word
2761 region->init_req(1, _igvn.transform(new IfTrueNode(iff)));
2762 mark_phi->init_req(1, mark);
2763
2764 // Locked: Load prototype header from klass
2765 ctrl = _igvn.transform(new IfFalseNode(iff));
2766 Node* proto = MakeConX(0);
2767 for (uint i = FlatArrayCheckNode::ArrayOrKlass; i < check->req(); ++i) {
2768 Node* ary = check->in(i);
2769 // Make loads control dependent to make sure they are only executed if array is locked
2770 Node* klass_adr = basic_plus_adr(ary, oopDesc::klass_offset_in_bytes());
2771 Node* klass = _igvn.transform(LoadKlassNode::make(_igvn, ctrl, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
2772 Node* proto_adr = basic_plus_adr(klass, in_bytes(Klass::prototype_header_offset()));
2773 Node* proto_load = _igvn.transform(LoadNode::make(_igvn, ctrl, C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
2774 proto = _igvn.transform(new OrXNode(proto, proto_load));
2775 }
2776 region->init_req(2, ctrl);
2777 mark_phi->init_req(2, proto);
2778
2779 // Check if flat array bits are set
2780 Node* mask = MakeConX(markWord::flat_array_bit_in_place);
2781 Node* masked = _igvn.transform(new AndXNode(_igvn.transform(mark_phi), mask));
2782 cmp = _igvn.transform(new CmpXNode(masked, MakeConX(0)));
2783 Node* is_not_flat = _igvn.transform(new BoolNode(cmp, BoolTest::eq));
2784
2785 ctrl = _igvn.transform(region);
2786 iff = _igvn.transform(new IfNode(ctrl, is_not_flat, PROB_MAX, COUNT_UNKNOWN))->as_If();
2787 _igvn.replace_node(old_iff, iff);
2788 }
2789 _igvn.replace_node(check, C->top());
2790 } else {
2791 // Fall back to layout helper check
2792 Node* lhs = intcon(0);
2793 for (uint i = FlatArrayCheckNode::ArrayOrKlass; i < check->req(); ++i) {
2794 Node* array_or_klass = check->in(i);
2795 Node* klass = nullptr;
2796 const TypePtr* t = _igvn.type(array_or_klass)->is_ptr();
2797 assert(!t->is_flat() && !t->is_not_flat(), "Should have been optimized out");
2798 if (t->isa_oopptr() != nullptr) {
2799 Node* klass_adr = basic_plus_adr(array_or_klass, oopDesc::klass_offset_in_bytes());
2800 klass = transform_later(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
2801 } else {
2802 assert(t->isa_klassptr(), "Unexpected input type");
2803 klass = array_or_klass;
2804 }
2805 Node* lh_addr = basic_plus_adr(klass, in_bytes(Klass::layout_helper_offset()));
2806 Node* lh_val = _igvn.transform(LoadNode::make(_igvn, nullptr, C->immutable_memory(), lh_addr, lh_addr->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
2807 lhs = _igvn.transform(new OrINode(lhs, lh_val));
2808 }
2809 Node* masked = transform_later(new AndINode(lhs, intcon(Klass::_lh_array_tag_flat_value_bit_inplace)));
2810 Node* cmp = transform_later(new CmpINode(masked, intcon(0)));
2811 Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
2812 Node* m2b = transform_later(new Conv2BNode(masked));
2813 // The matcher expects the input to If nodes to be produced by a Bool(CmpI..)
2814 // pattern, but the input to other potential users (e.g. Phi) to be some
2815 // other pattern (e.g. a Conv2B node, possibly idealized as a CMoveI).
2816 Node* old_bol = check->unique_out();
2817 for (DUIterator_Last imin, i = old_bol->last_outs(imin); i >= imin; --i) {
2818 Node* user = old_bol->last_out(i);
2819 for (uint j = 0; j < user->req(); j++) {
2820 Node* n = user->in(j);
2821 if (n == old_bol) {
2822 _igvn.replace_input_of(user, j, user->is_If() ? bol : m2b);
2823 }
2824 }
2825 }
2826 _igvn.replace_node(check, C->top());
2827 }
2828 }
2829
2830 //---------------------------eliminate_macro_nodes----------------------
2831 // Eliminate scalar replaced allocations and associated locks.
2832 void PhaseMacroExpand::eliminate_macro_nodes() {
2833 if (C->macro_count() == 0)
2834 return;
2835 NOT_PRODUCT(int membar_before = count_MemBar(C);)
2836
2837 // Before elimination may re-mark (change to Nested or NonEscObj)
2838 // all associated (same box and obj) lock and unlock nodes.
2839 int cnt = C->macro_count();
2840 for (int i=0; i < cnt; i++) {
2841 Node *n = C->macro_node(i);
2842 if (n->is_AbstractLock()) { // Lock and Unlock nodes
2843 mark_eliminated_locking_nodes(n->as_AbstractLock());
2844 }
2845 }
2846 // Re-marking may break consistency of Coarsened locks.
2847 if (!C->coarsened_locks_consistent()) {
2848 return; // recompile without Coarsened locks if broken
2849 }
2870 }
2871 // Next, attempt to eliminate allocations
2872 _has_locks = false;
2873 progress = true;
2874 while (progress) {
2875 progress = false;
2876 for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once
2877 Node* n = C->macro_node(i - 1);
2878 bool success = false;
2879 DEBUG_ONLY(int old_macro_count = C->macro_count();)
2880 switch (n->class_id()) {
2881 case Node::Class_Allocate:
2882 case Node::Class_AllocateArray:
2883 success = eliminate_allocate_node(n->as_Allocate());
2884 #ifndef PRODUCT
2885 if (success && PrintOptoStatistics) {
2886 Atomic::inc(&PhaseMacroExpand::_objs_scalar_replaced_counter);
2887 }
2888 #endif
2889 break;
2890 case Node::Class_CallStaticJava: {
2891 CallStaticJavaNode* call = n->as_CallStaticJava();
2892 if (!call->method()->is_method_handle_intrinsic()) {
2893 success = eliminate_boxing_node(n->as_CallStaticJava());
2894 }
2895 break;
2896 }
2897 case Node::Class_Lock:
2898 case Node::Class_Unlock:
2899 assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
2900 _has_locks = true;
2901 break;
2902 case Node::Class_ArrayCopy:
2903 break;
2904 case Node::Class_OuterStripMinedLoop:
2905 break;
2906 case Node::Class_SubTypeCheck:
2907 break;
2908 case Node::Class_Opaque1:
2909 break;
2910 case Node::Class_FlatArrayCheck:
2911 break;
2912 default:
2913 assert(n->Opcode() == Op_LoopLimit ||
2914 n->Opcode() == Op_Opaque3 ||
2915 n->Opcode() == Op_Opaque4 ||
2916 n->Opcode() == Op_MaxL ||
2917 n->Opcode() == Op_MinL ||
2918 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n),
2919 "unknown node type in macro list");
2920 }
2921 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2922 progress = progress || success;
2923 }
2924 }
2925 #ifndef PRODUCT
2926 if (PrintOptoStatistics) {
2927 int membar_after = count_MemBar(C);
2928 Atomic::add(&PhaseMacroExpand::_memory_barriers_removed_counter, membar_before - membar_after);
2929 }
2930 #endif
2931 }
2937 C->shuffle_macro_nodes();
2938 }
2939 // Last attempt to eliminate macro nodes.
2940 eliminate_macro_nodes();
2941 if (C->failing()) return true;
2942
2943 // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2944 bool progress = true;
2945 while (progress) {
2946 progress = false;
2947 for (int i = C->macro_count(); i > 0; i--) {
2948 Node* n = C->macro_node(i-1);
2949 bool success = false;
2950 DEBUG_ONLY(int old_macro_count = C->macro_count();)
2951 if (n->Opcode() == Op_LoopLimit) {
2952 // Remove it from macro list and put on IGVN worklist to optimize.
2953 C->remove_macro_node(n);
2954 _igvn._worklist.push(n);
2955 success = true;
2956 } else if (n->Opcode() == Op_CallStaticJava) {
2957 CallStaticJavaNode* call = n->as_CallStaticJava();
2958 if (!call->method()->is_method_handle_intrinsic()) {
2959 // Remove it from macro list and put on IGVN worklist to optimize.
2960 C->remove_macro_node(n);
2961 _igvn._worklist.push(n);
2962 success = true;
2963 }
2964 } else if (n->is_Opaque1()) {
2965 _igvn.replace_node(n, n->in(1));
2966 success = true;
2967 #if INCLUDE_RTM_OPT
2968 } else if ((n->Opcode() == Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
2969 assert(C->profile_rtm(), "should be used only in rtm deoptimization code");
2970 assert((n->outcnt() == 1) && n->unique_out()->is_Cmp(), "");
2971 Node* cmp = n->unique_out();
2972 #ifdef ASSERT
2973 // Validate graph.
2974 assert((cmp->outcnt() == 1) && cmp->unique_out()->is_Bool(), "");
2975 BoolNode* bol = cmp->unique_out()->as_Bool();
2976 assert((bol->outcnt() == 1) && bol->unique_out()->is_If() &&
2977 (bol->_test._test == BoolTest::ne), "");
2978 IfNode* ifn = bol->unique_out()->as_If();
2979 assert((ifn->outcnt() == 2) &&
2980 ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != nullptr, "");
2981 #endif
2982 Node* repl = n->in(1);
2983 if (!_has_locks) {
3061 // Worst case is a macro node gets expanded into about 200 nodes.
3062 // Allow 50% more for optimization.
3063 if (C->check_node_count(300, "out of nodes before macro expansion")) {
3064 return true;
3065 }
3066
3067 DEBUG_ONLY(int old_macro_count = C->macro_count();)
3068 switch (n->class_id()) {
3069 case Node::Class_Lock:
3070 expand_lock_node(n->as_Lock());
3071 break;
3072 case Node::Class_Unlock:
3073 expand_unlock_node(n->as_Unlock());
3074 break;
3075 case Node::Class_ArrayCopy:
3076 expand_arraycopy_node(n->as_ArrayCopy());
3077 break;
3078 case Node::Class_SubTypeCheck:
3079 expand_subtypecheck_node(n->as_SubTypeCheck());
3080 break;
3081 case Node::Class_CallStaticJava:
3082 expand_mh_intrinsic_return(n->as_CallStaticJava());
3083 C->remove_macro_node(n);
3084 break;
3085 case Node::Class_FlatArrayCheck:
3086 expand_flatarraycheck_node(n->as_FlatArrayCheck());
3087 break;
3088 default:
3089 assert(false, "unknown node type in macro list");
3090 }
3091 assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
3092 if (C->failing()) return true;
3093 C->print_method(PHASE_AFTER_MACRO_EXPANSION_STEP, 5, n);
3094
3095 // Clean up the graph so we're less likely to hit the maximum node
3096 // limit
3097 _igvn.set_delay_transform(false);
3098 _igvn.optimize();
3099 if (C->failing()) return true;
3100 _igvn.set_delay_transform(true);
3101 }
3102
3103 // All nodes except Allocate nodes are expanded now. There could be
3104 // new optimization opportunities (such as folding newly created
3105 // load from a just allocated object). Run IGVN.
3106
3107 // expand "macro" nodes
|