< prev index next >

src/hotspot/share/opto/graphKit.cpp

Print this page

2571     set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2572   }
2573   return call;
2574 
2575 }
2576 
2577 // i2b
2578 Node* GraphKit::sign_extend_byte(Node* in) {
2579   Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2580   return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2581 }
2582 
2583 // i2s
2584 Node* GraphKit::sign_extend_short(Node* in) {
2585   Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2586   return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2587 }
2588 
2589 //-----------------------------make_native_call-------------------------------
2590 Node* GraphKit::make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) {


2591   // Select just the actual call args to pass on
2592   // [MethodHandle fallback, long addr, HALF addr, ... args , NativeEntryPoint nep]
2593   //                                             |          |
2594   //                                             V          V
2595   //                                             [ ... args ]
2596   uint n_filtered_args = nargs - 4; // -fallback, -addr (2), -nep;
2597   ResourceMark rm;
2598   Node** argument_nodes = NEW_RESOURCE_ARRAY(Node*, n_filtered_args);
2599   const Type** arg_types = TypeTuple::fields(n_filtered_args);
2600   GrowableArray<VMReg> arg_regs(C->comp_arena(), n_filtered_args, n_filtered_args, VMRegImpl::Bad());
2601 
2602   VMReg* argRegs = nep->argMoves();
2603   {
2604     for (uint vm_arg_pos = 0, java_arg_read_pos = 0;
2605         vm_arg_pos < n_filtered_args; vm_arg_pos++) {
2606       uint vm_unfiltered_arg_pos = vm_arg_pos + 3; // +3 to skip fallback handle argument and addr (2 since long)
2607       Node* node = argument(vm_unfiltered_arg_pos);
2608       const Type* type = call_type->domain()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);
2609       VMReg reg = type == Type::HALF
2610         ? VMRegImpl::Bad()
2611         : argRegs[java_arg_read_pos++];
2612 
2613       argument_nodes[vm_arg_pos] = node;
2614       arg_types[TypeFunc::Parms + vm_arg_pos] = type;
2615       arg_regs.at_put(vm_arg_pos, reg);
2616     }
2617   }
2618 
2619   uint n_returns = call_type->range()->cnt() - TypeFunc::Parms;
2620   GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad());
2621   const Type** ret_types = TypeTuple::fields(n_returns);
2622 
2623   VMReg* retRegs = nep->returnMoves();
2624   {
2625     for (uint vm_ret_pos = 0, java_ret_read_pos = 0;
2626         vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 1
2627       const Type* type = call_type->range()->field_at(TypeFunc::Parms + vm_ret_pos);
2628       VMReg reg = type == Type::HALF
2629         ? VMRegImpl::Bad()
2630         : retRegs[java_ret_read_pos++];
2631 
2632       ret_regs.at_put(vm_ret_pos, reg);
2633       ret_types[TypeFunc::Parms + vm_ret_pos] = type;
2634     }
2635   }
2636 
2637   const TypeFunc* new_call_type = TypeFunc::make(
2638     TypeTuple::make(TypeFunc::Parms + n_filtered_args, arg_types),
2639     TypeTuple::make(TypeFunc::Parms + n_returns, ret_types)
2640   );
2641 
2642   if (nep->need_transition()) {
2643     RuntimeStub* invoker = SharedRuntime::make_native_invoker(call_addr,
2644                                                               nep->shadow_space(),
2645                                                               arg_regs, ret_regs);
2646     if (invoker == NULL) {
2647       C->record_failure("native invoker not implemented on this platform");
2648       return NULL;
2649     }
2650     C->add_native_invoker(invoker);
2651     call_addr = invoker->code_begin();
2652   }
2653   assert(call_addr != NULL, "sanity");
2654 
2655   CallNativeNode* call = new CallNativeNode(new_call_type, call_addr, nep->name(), TypePtr::BOTTOM,
2656                                             arg_regs,
2657                                             ret_regs,
2658                                             nep->shadow_space(),
2659                                             nep->need_transition());
2660 
2661   if (call->_need_transition) {
2662     add_safepoint_edges(call);
2663   }
2664 
2665   set_predefined_input_for_runtime_call(call);
2666 
2667   for (uint i = 0; i < n_filtered_args; i++) {
2668     call->init_req(i + TypeFunc::Parms, argument_nodes[i]);
2669   }
2670 
2671   Node* c = gvn().transform(call);
2672   assert(c == call, "cannot disappear");
2673 
2674   set_predefined_output_for_runtime_call(call);
2675 
2676   Node* ret;
2677   if (method() == NULL || method()->return_type()->basic_type() == T_VOID) {
2678     ret = top();
2679   } else {
2680     ret =  gvn().transform(new ProjNode(call, TypeFunc::Parms));
2681     // Unpack native results if needed
2682     // Need this method type since it's unerased
2683     switch (nep->method_type()->rtype()->basic_type()) {

2571     set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2572   }
2573   return call;
2574 
2575 }
2576 
2577 // i2b
2578 Node* GraphKit::sign_extend_byte(Node* in) {
2579   Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2580   return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2581 }
2582 
2583 // i2s
2584 Node* GraphKit::sign_extend_short(Node* in) {
2585   Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2586   return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2587 }
2588 
2589 //-----------------------------make_native_call-------------------------------
2590 Node* GraphKit::make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) {
2591   assert(!nep->need_transition(), "only trivial calls");
2592 
2593   // Select just the actual call args to pass on
2594   // [long addr, HALF addr, ... args , NativeEntryPoint nep]
2595   //                      |          |
2596   //                      V          V
2597   //                      [ ... args ]
2598   uint n_filtered_args = nargs - 3; // -addr (2), -nep;
2599   ResourceMark rm;
2600   Node** argument_nodes = NEW_RESOURCE_ARRAY(Node*, n_filtered_args);
2601   const Type** arg_types = TypeTuple::fields(n_filtered_args);
2602   GrowableArray<VMReg> arg_regs(C->comp_arena(), n_filtered_args, n_filtered_args, VMRegImpl::Bad());
2603 
2604   VMReg* argRegs = nep->argMoves();
2605   {
2606     for (uint vm_arg_pos = 0, java_arg_read_pos = 0;
2607         vm_arg_pos < n_filtered_args; vm_arg_pos++) {
2608       uint vm_unfiltered_arg_pos = vm_arg_pos + 2; // +2 to skip addr (2 since long)
2609       Node* node = argument(vm_unfiltered_arg_pos);
2610       const Type* type = call_type->domain()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);
2611       VMReg reg = type == Type::HALF
2612         ? VMRegImpl::Bad()
2613         : argRegs[java_arg_read_pos++];
2614 
2615       argument_nodes[vm_arg_pos] = node;
2616       arg_types[TypeFunc::Parms + vm_arg_pos] = type;
2617       arg_regs.at_put(vm_arg_pos, reg);
2618     }
2619   }
2620 
2621   uint n_returns = call_type->range()->cnt() - TypeFunc::Parms;
2622   GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad());
2623   const Type** ret_types = TypeTuple::fields(n_returns);
2624 
2625   VMReg* retRegs = nep->returnMoves();
2626   {
2627     for (uint vm_ret_pos = 0, java_ret_read_pos = 0;
2628         vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 1
2629       const Type* type = call_type->range()->field_at(TypeFunc::Parms + vm_ret_pos);
2630       VMReg reg = type == Type::HALF
2631         ? VMRegImpl::Bad()
2632         : retRegs[java_ret_read_pos++];
2633 
2634       ret_regs.at_put(vm_ret_pos, reg);
2635       ret_types[TypeFunc::Parms + vm_ret_pos] = type;
2636     }
2637   }
2638 
2639   const TypeFunc* new_call_type = TypeFunc::make(
2640     TypeTuple::make(TypeFunc::Parms + n_filtered_args, arg_types),
2641     TypeTuple::make(TypeFunc::Parms + n_returns, ret_types)
2642   );
2643 
2644   CallNode* call = new CallNativeNode(new_call_type, call_addr, nep->name(), TypePtr::BOTTOM,
2645                             arg_regs,
2646                             ret_regs,
2647                             nep->shadow_space());








2648 
2649   assert(call != nullptr, "'call' was not set");








2650 
2651   set_predefined_input_for_runtime_call(call);
2652 
2653   for (uint i = 0; i < n_filtered_args; i++) {
2654     call->init_req(i + TypeFunc::Parms, argument_nodes[i]);
2655   }
2656 
2657   Node* c = gvn().transform(call);
2658   assert(c == call, "cannot disappear");
2659 
2660   set_predefined_output_for_runtime_call(call);
2661 
2662   Node* ret;
2663   if (method() == NULL || method()->return_type()->basic_type() == T_VOID) {
2664     ret = top();
2665   } else {
2666     ret =  gvn().transform(new ProjNode(call, TypeFunc::Parms));
2667     // Unpack native results if needed
2668     // Need this method type since it's unerased
2669     switch (nep->method_type()->rtype()->basic_type()) {
< prev index next >