< prev index next >

src/share/vm/c1/c1_LinearScan.cpp

Print this page




  30 #include "c1/c1_IR.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_LinearScan.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "utilities/bitMap.inline.hpp"
  35 #ifdef TARGET_ARCH_x86
  36 # include "vmreg_x86.inline.hpp"
  37 #endif
  38 #ifdef TARGET_ARCH_sparc
  39 # include "vmreg_sparc.inline.hpp"
  40 #endif
  41 #ifdef TARGET_ARCH_zero
  42 # include "vmreg_zero.inline.hpp"
  43 #endif
  44 #ifdef TARGET_ARCH_arm
  45 # include "vmreg_arm.inline.hpp"
  46 #endif
  47 #ifdef TARGET_ARCH_ppc
  48 # include "vmreg_ppc.inline.hpp"
  49 #endif




  50 
  51 
  52 #ifndef PRODUCT
  53 
  54   static LinearScanStatistic _stat_before_alloc;
  55   static LinearScanStatistic _stat_after_asign;
  56   static LinearScanStatistic _stat_final;
  57 
  58   static LinearScanTimers _total_timer;
  59 
  60   // helper macro for short definition of timer
  61   #define TIME_LINEAR_SCAN(timer_name)  TraceTime _block_timer("", _total_timer.timer(LinearScanTimers::timer_name), TimeLinearScan || TimeEachLinearScan, Verbose);
  62 
  63   // helper macro for short definition of trace-output inside code
  64   #define TRACE_LINEAR_SCAN(level, code)       \
  65     if (TraceLinearScanLevel >= level) {       \
  66       code;                                    \
  67     }
  68 
  69 #else


 176     return -1;
 177   }
 178 }
 179 
 180 
 181 // ********** functions for classification of intervals
 182 
 183 bool LinearScan::is_precolored_interval(const Interval* i) {
 184   return i->reg_num() < LinearScan::nof_regs;
 185 }
 186 
 187 bool LinearScan::is_virtual_interval(const Interval* i) {
 188   return i->reg_num() >= LIR_OprDesc::vreg_base;
 189 }
 190 
 191 bool LinearScan::is_precolored_cpu_interval(const Interval* i) {
 192   return i->reg_num() < LinearScan::nof_cpu_regs;
 193 }
 194 
 195 bool LinearScan::is_virtual_cpu_interval(const Interval* i) {
 196 #if defined(__SOFTFP__) || defined(E500V2)
 197   return i->reg_num() >= LIR_OprDesc::vreg_base;
 198 #else
 199   return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE);
 200 #endif // __SOFTFP__ or E500V2
 201 }
 202 
 203 bool LinearScan::is_precolored_fpu_interval(const Interval* i) {
 204   return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs;
 205 }
 206 
 207 bool LinearScan::is_virtual_fpu_interval(const Interval* i) {
 208 #if defined(__SOFTFP__) || defined(E500V2)
 209   return false;
 210 #else
 211   return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE);
 212 #endif // __SOFTFP__ or E500V2
 213 }
 214 
 215 bool LinearScan::is_in_fpu_register(const Interval* i) {
 216   // fixed intervals not needed for FPU stack allocation
 217   return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg;
 218 }
 219 
 220 bool LinearScan::is_oop_interval(const Interval* i) {
 221   // fixed intervals never contain oops
 222   return i->reg_num() >= nof_regs && i->type() == T_OBJECT;
 223 }
 224 
 225 
 226 // ********** General helper functions
 227 
 228 // compute next unused stack index that can be used for spilling
 229 int LinearScan::allocate_spill_slot(bool double_word) {
 230   int spill_slot;
 231   if (double_word) {


2057       case T_OBJECT: {
2058         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2059         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2060         return LIR_OprFact::single_cpu_oop(assigned_reg);
2061       }
2062 
2063       case T_ADDRESS: {
2064         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2065         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2066         return LIR_OprFact::single_cpu_address(assigned_reg);
2067       }
2068 
2069       case T_METADATA: {
2070         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2071         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2072         return LIR_OprFact::single_cpu_metadata(assigned_reg);
2073       }
2074 
2075 #ifdef __SOFTFP__
2076       case T_FLOAT:  // fall through
2077 #endif // __SOFTFP__







2078       case T_INT: {
2079         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2080         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2081         return LIR_OprFact::single_cpu(assigned_reg);
2082       }
2083 
2084 #ifdef __SOFTFP__
2085       case T_DOUBLE:  // fall through
2086 #endif // __SOFTFP__








2087       case T_LONG: {
2088         int assigned_regHi = interval->assigned_regHi();
2089         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2090         assert(num_physical_regs(T_LONG) == 1 ||
2091                (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2092 
2093         assert(assigned_reg != assigned_regHi, "invalid allocation");
2094         assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2095                "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2096         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2097         if (requires_adjacent_regs(T_LONG)) {
2098           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2099         }
2100 
2101 #ifdef _LP64
2102         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2103 #else
2104 #if defined(SPARC) || defined(PPC)
2105         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2106 #else


2121 
2122         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2123         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2124         return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
2125       }
2126 
2127       case T_DOUBLE: {
2128 #ifdef X86
2129         if (UseSSE >= 2) {
2130           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
2131           assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
2132           return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
2133         }
2134 #endif
2135 
2136 #ifdef SPARC
2137         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2138         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2139         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2140         LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
2141 #elif defined(ARM32)
2142         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2143         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2144         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2145         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2146 #else
2147         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2148         assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
2149         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
2150 #endif
2151         return result;
2152       }
2153 #endif // __SOFTFP__
2154 
2155       default: {
2156         ShouldNotReachHere();
2157         return LIR_OprFact::illegalOpr;
2158       }
2159     }
2160   }
2161 }


2710       // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
2711       // the double as float registers in the native ordering. On X86,
2712       // fpu_regnrLo is a FPU stack slot whose VMReg represents
2713       // the low-order word of the double and fpu_regnrLo + 1 is the
2714       // name for the other half.  *first and *second must represent the
2715       // least and most significant words, respectively.
2716 
2717 #ifdef X86
2718       // the exact location of fpu stack values is only known
2719       // during fpu stack allocation, so the stack allocator object
2720       // must be present
2721       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2722       assert(_fpu_stack_allocator != NULL, "must be present");
2723       opr = _fpu_stack_allocator->to_fpu_stack(opr);
2724 
2725       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
2726 #endif
2727 #ifdef SPARC
2728       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2729 #endif
2730 #ifdef ARM32
2731       assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2732 #endif
2733 #ifdef PPC
2734       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2735 #endif
2736 
2737 #ifdef VM_LITTLE_ENDIAN
2738       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
2739 #else
2740       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2741 #endif
2742 
2743 #ifdef _LP64
2744       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2745       second = _int_0_scope_value;
2746 #else
2747       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2748       // %%% This is probably a waste but we'll keep things as they were for now
2749       if (true) {
2750         VMReg rname_second = rname_first->next();
2751         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2752       }




  30 #include "c1/c1_IR.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_LinearScan.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "utilities/bitMap.inline.hpp"
  35 #ifdef TARGET_ARCH_x86
  36 # include "vmreg_x86.inline.hpp"
  37 #endif
  38 #ifdef TARGET_ARCH_sparc
  39 # include "vmreg_sparc.inline.hpp"
  40 #endif
  41 #ifdef TARGET_ARCH_zero
  42 # include "vmreg_zero.inline.hpp"
  43 #endif
  44 #ifdef TARGET_ARCH_arm
  45 # include "vmreg_arm.inline.hpp"
  46 #endif
  47 #ifdef TARGET_ARCH_ppc
  48 # include "vmreg_ppc.inline.hpp"
  49 #endif
  50 #ifdef TARGET_ARCH_aarch32
  51 # include "vmreg_aarch32.inline.hpp"
  52 # include "vm_version_aarch32.hpp"
  53 #endif
  54 
  55 
  56 #ifndef PRODUCT
  57 
  58   static LinearScanStatistic _stat_before_alloc;
  59   static LinearScanStatistic _stat_after_asign;
  60   static LinearScanStatistic _stat_final;
  61 
  62   static LinearScanTimers _total_timer;
  63 
  64   // helper macro for short definition of timer
  65   #define TIME_LINEAR_SCAN(timer_name)  TraceTime _block_timer("", _total_timer.timer(LinearScanTimers::timer_name), TimeLinearScan || TimeEachLinearScan, Verbose);
  66 
  67   // helper macro for short definition of trace-output inside code
  68   #define TRACE_LINEAR_SCAN(level, code)       \
  69     if (TraceLinearScanLevel >= level) {       \
  70       code;                                    \
  71     }
  72 
  73 #else


 180     return -1;
 181   }
 182 }
 183 
 184 
 185 // ********** functions for classification of intervals
 186 
 187 bool LinearScan::is_precolored_interval(const Interval* i) {
 188   return i->reg_num() < LinearScan::nof_regs;
 189 }
 190 
 191 bool LinearScan::is_virtual_interval(const Interval* i) {
 192   return i->reg_num() >= LIR_OprDesc::vreg_base;
 193 }
 194 
 195 bool LinearScan::is_precolored_cpu_interval(const Interval* i) {
 196   return i->reg_num() < LinearScan::nof_cpu_regs;
 197 }
 198 
 199 bool LinearScan::is_virtual_cpu_interval(const Interval* i) {
 200 #if !defined(AARCH32) && (defined(__SOFTFP__) || defined(E500V2))
 201   return i->reg_num() >= LIR_OprDesc::vreg_base;
 202 #else
 203   return i->reg_num() >= LIR_OprDesc::vreg_base && (AARCH32_ONLY(!hasFPU() ||) (i->type() != T_FLOAT && i->type() != T_DOUBLE));
 204 #endif // __SOFTFP__ or E500V2
 205 }
 206 
 207 bool LinearScan::is_precolored_fpu_interval(const Interval* i) {
 208   return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs;
 209 }
 210 
 211 bool LinearScan::is_virtual_fpu_interval(const Interval* i) {
 212 #if !defined(AARCH32) && (defined(__SOFTFP__) || defined(E500V2))
 213   return false;
 214 #else
 215   return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE) AARCH32_ONLY(&& hasFPU());
 216 #endif // __SOFTFP__ or E500V2
 217 }
 218 
 219 bool LinearScan::is_in_fpu_register(const Interval* i) {
 220   // fixed intervals not needed for FPU stack allocation
 221   return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg;
 222 }
 223 
 224 bool LinearScan::is_oop_interval(const Interval* i) {
 225   // fixed intervals never contain oops
 226   return i->reg_num() >= nof_regs && i->type() == T_OBJECT;
 227 }
 228 
 229 
 230 // ********** General helper functions
 231 
 232 // compute next unused stack index that can be used for spilling
 233 int LinearScan::allocate_spill_slot(bool double_word) {
 234   int spill_slot;
 235   if (double_word) {


2061       case T_OBJECT: {
2062         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2063         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2064         return LIR_OprFact::single_cpu_oop(assigned_reg);
2065       }
2066 
2067       case T_ADDRESS: {
2068         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2069         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2070         return LIR_OprFact::single_cpu_address(assigned_reg);
2071       }
2072 
2073       case T_METADATA: {
2074         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2075         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2076         return LIR_OprFact::single_cpu_metadata(assigned_reg);
2077       }
2078 
2079 #ifdef __SOFTFP__
2080       case T_FLOAT:  // fall through
2081 #if defined(AARCH32)
2082       if(hasFPU()) {
2083         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2084         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2085         return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
2086       }
2087 #endif
2088 #endif
2089       case T_INT: {
2090         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2091         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2092         return LIR_OprFact::single_cpu(assigned_reg);
2093       }
2094 
2095 #ifdef __SOFTFP__
2096       case T_DOUBLE:  // fall through
2097 #if defined(AARCH32)
2098         if(hasFPU()) {
2099             assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2100             assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2101             assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2102             return LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2103         }
2104 #endif
2105 #endif
2106       case T_LONG: {
2107         int assigned_regHi = interval->assigned_regHi();
2108         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2109         assert(num_physical_regs(T_LONG) == 1 ||
2110                (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2111 
2112         assert(assigned_reg != assigned_regHi, "invalid allocation");
2113         assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2114                "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2115         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2116         if (requires_adjacent_regs(T_LONG)) {
2117           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2118         }
2119 
2120 #ifdef _LP64
2121         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2122 #else
2123 #if defined(SPARC) || defined(PPC)
2124         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2125 #else


2140 
2141         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2142         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2143         return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
2144       }
2145 
2146       case T_DOUBLE: {
2147 #ifdef X86
2148         if (UseSSE >= 2) {
2149           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
2150           assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
2151           return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
2152         }
2153 #endif
2154 
2155 #ifdef SPARC
2156         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2157         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2158         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2159         LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
2160 #elif defined(ARM32) || defined(AARCH32)
2161         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2162         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2163         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2164         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2165 #else
2166         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2167         assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
2168         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
2169 #endif
2170         return result;
2171       }
2172 #endif // __SOFTFP__
2173 
2174       default: {
2175         ShouldNotReachHere();
2176         return LIR_OprFact::illegalOpr;
2177       }
2178     }
2179   }
2180 }


2729       // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
2730       // the double as float registers in the native ordering. On X86,
2731       // fpu_regnrLo is a FPU stack slot whose VMReg represents
2732       // the low-order word of the double and fpu_regnrLo + 1 is the
2733       // name for the other half.  *first and *second must represent the
2734       // least and most significant words, respectively.
2735 
2736 #ifdef X86
2737       // the exact location of fpu stack values is only known
2738       // during fpu stack allocation, so the stack allocator object
2739       // must be present
2740       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2741       assert(_fpu_stack_allocator != NULL, "must be present");
2742       opr = _fpu_stack_allocator->to_fpu_stack(opr);
2743 
2744       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
2745 #endif
2746 #ifdef SPARC
2747       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2748 #endif
2749 #if defined(ARM32) || defined(AARCH32)
2750       assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2751 #endif // ARM32 || AARCH32
2752 #ifdef PPC
2753       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2754 #endif
2755 
2756 #ifdef VM_LITTLE_ENDIAN
2757       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
2758 #else
2759       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2760 #endif
2761 
2762 #ifdef _LP64
2763       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2764       second = _int_0_scope_value;
2765 #else
2766       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2767       // %%% This is probably a waste but we'll keep things as they were for now
2768       if (true) {
2769         VMReg rname_second = rname_first->next();
2770         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2771       }


< prev index next >