19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
38 #include "utilities/macros.hpp"
39
40 #ifndef CC_INTERP
41 #define __ _masm->
42
43 //----------------------------------------------------------------------------------------------------
44 // Platform-dependent initialization
45
46 void TemplateTable::pd_initialize() {
47 // No i486 specific initialization
48 }
49
50 //----------------------------------------------------------------------------------------------------
51 // Address computation
52
53 // local variables
54 static inline Address iaddress(int n) {
55 return Address(rdi, Interpreter::local_offset_in_bytes(n));
56 }
57
58 static inline Address laddress(int n) { return iaddress(n + 1); }
148 val != noreg /* tosca_live */,
149 false /* expand_call */);
150
151 // Do the actual store
152 // noreg means NULL
153 if (val == noreg) {
154 __ movptr(Address(rdx, 0), NULL_WORD);
155 // No post barrier for NULL
156 } else {
157 __ movl(Address(rdx, 0), val);
158 __ g1_write_barrier_post(rdx /* store_adr */,
159 val /* new_val */,
160 rcx /* thread */,
161 rbx /* tmp */,
162 rsi /* tmp2 */);
163 }
164 __ restore_bcp();
165
166 }
167 break;
168 #endif // INCLUDE_ALL_GCS
169 case BarrierSet::CardTableModRef:
170 case BarrierSet::CardTableExtension:
171 {
172 if (val == noreg) {
173 __ movptr(obj, NULL_WORD);
174 } else {
175 __ movl(obj, val);
176 // flatten object address if needed
177 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
178 __ store_check(obj.base());
179 } else {
180 __ leal(rdx, obj);
181 __ store_check(rdx);
182 }
183 }
184 }
185 break;
186 case BarrierSet::ModRef:
187 case BarrierSet::Other:
651 index_check(rdx, rax); // kills rbx,
652 // rax,: index
653 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
654 }
655
656
657 void TemplateTable::daload() {
658 transition(itos, dtos);
659 // rdx: array
660 index_check(rdx, rax); // kills rbx,
661 // rax,: index
662 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
663 }
664
665
666 void TemplateTable::aaload() {
667 transition(itos, atos);
668 // rdx: array
669 index_check(rdx, rax); // kills rbx,
670 // rax,: index
671 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
672 }
673
674
675 void TemplateTable::baload() {
676 transition(itos, itos);
677 // rdx: array
678 index_check(rdx, rax); // kills rbx,
679 // rax,: index
680 // can do better code for P5 - fix this at some point
681 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
682 __ mov(rax, rbx);
683 }
684
685
686 void TemplateTable::caload() {
687 transition(itos, itos);
688 // rdx: array
689 index_check(rdx, rax); // kills rbx,
690 // rax,: index
691 // can do better code for P5 - may want to improve this at some point
2286
2287 __ bind(notBool);
2288
2289 // itos
2290 __ cmpl(flags, itos );
2291 __ jcc(Assembler::notEqual, notInt);
2292
2293 __ movl(rax, lo );
2294 __ push(itos);
2295 // Rewrite bytecode to be faster
2296 if (!is_static) {
2297 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2298 }
2299 __ jmp(Done);
2300
2301 __ bind(notInt);
2302 // atos
2303 __ cmpl(flags, atos );
2304 __ jcc(Assembler::notEqual, notObj);
2305
2306 __ movl(rax, lo );
2307 __ push(atos);
2308 if (!is_static) {
2309 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2310 }
2311 __ jmp(Done);
2312
2313 __ bind(notObj);
2314 // ctos
2315 __ cmpl(flags, ctos );
2316 __ jcc(Assembler::notEqual, notChar);
2317
2318 __ load_unsigned_short(rax, lo );
2319 __ push(ctos);
2320 if (!is_static) {
2321 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2322 }
2323 __ jmp(Done);
2324
2325 __ bind(notChar);
2854 Address::times_ptr,
2855 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2856
2857
2858 // rax,: object
2859 __ verify_oop(rax);
2860 __ null_check(rax);
2861 // field addresses
2862 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2863 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2864
2865 // access field
2866 switch (bytecode()) {
2867 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2868 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2869 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2870 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2871 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2872 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2873 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2874 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2875 default:
2876 ShouldNotReachHere();
2877 }
2878
2879 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2880 // volatile_barrier( );
2881 }
2882
2883 void TemplateTable::fast_xaccess(TosState state) {
2884 transition(vtos, state);
2885 // get receiver
2886 __ movptr(rax, aaddress(0));
2887 // access constant pool cache
2888 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2889 __ movptr(rbx, Address(rcx,
2890 rdx,
2891 Address::times_ptr,
2892 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2893 // make sure exception is reported in correct bcp range (getfield is next instruction)
2894 __ increment(rsi);
2895 __ null_check(rax);
2896 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2897 if (state == itos) {
2898 __ movl(rax, lo);
2899 } else if (state == atos) {
2900 __ movptr(rax, lo);
2901 __ verify_oop(rax);
2902 } else if (state == ftos) {
2903 __ fld_s(lo);
2904 } else {
2905 ShouldNotReachHere();
2906 }
2907 __ decrement(rsi);
2908 }
2909
2910
2911
2912 //----------------------------------------------------------------------------------------------------
2913 // Calls
2914
2915 void TemplateTable::count_calls(Register method, Register temp) {
2916 // implemented elsewhere
2917 ShouldNotReachHere();
2918 }
2919
2935 const bool save_flags = (flags != noreg);
2936 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2937 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2938 assert(flags == noreg || flags == rdx, "");
2939 assert(recv == noreg || recv == rcx, "");
2940
2941 // setup registers & access constant pool cache
2942 if (recv == noreg) recv = rcx;
2943 if (flags == noreg) flags = rdx;
2944 assert_different_registers(method, index, recv, flags);
2945
2946 // save 'interpreter return address'
2947 __ save_bcp();
2948
2949 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2950
2951 // maybe push appendix to arguments (just before return address)
2952 if (is_invokedynamic || is_invokehandle) {
2953 Label L_no_push;
2954 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2955 __ jccb(Assembler::zero, L_no_push);
2956 // Push the appendix as a trailing parameter.
2957 // This must be done before we get the receiver,
2958 // since the parameter_size includes it.
2959 __ push(rbx);
2960 __ mov(rbx, index);
2961 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2962 __ load_resolved_reference_at_index(index, rbx);
2963 __ pop(rbx);
2964 __ push(index); // push appendix (MethodType, CallSite, etc.)
2965 __ bind(L_no_push);
2966 }
2967
2968 // load receiver if needed (note: no return address pushed yet)
2969 if (load_receiver) {
2970 __ movl(recv, flags);
2971 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
2972 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
2973 const int receiver_is_at_end = -1; // back off one slot to get receiver
2974 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
38 #include "utilities/macros.hpp"
39 #if INCLUDE_ALL_GCS
40 #include "shenandoahBarrierSetAssembler_x86.hpp"
41 #endif
42
43 #ifndef CC_INTERP
44 #define __ _masm->
45
46 //----------------------------------------------------------------------------------------------------
47 // Platform-dependent initialization
48
49 void TemplateTable::pd_initialize() {
50 // No i486 specific initialization
51 }
52
53 //----------------------------------------------------------------------------------------------------
54 // Address computation
55
56 // local variables
57 static inline Address iaddress(int n) {
58 return Address(rdi, Interpreter::local_offset_in_bytes(n));
59 }
60
61 static inline Address laddress(int n) { return iaddress(n + 1); }
151 val != noreg /* tosca_live */,
152 false /* expand_call */);
153
154 // Do the actual store
155 // noreg means NULL
156 if (val == noreg) {
157 __ movptr(Address(rdx, 0), NULL_WORD);
158 // No post barrier for NULL
159 } else {
160 __ movl(Address(rdx, 0), val);
161 __ g1_write_barrier_post(rdx /* store_adr */,
162 val /* new_val */,
163 rcx /* thread */,
164 rbx /* tmp */,
165 rsi /* tmp2 */);
166 }
167 __ restore_bcp();
168
169 }
170 break;
171 case BarrierSet::ShenandoahBarrierSet:
172 {
173 // flatten object address if needed
174 // We do it regardless of precise because we need the registers
175 if (obj.index() == noreg && obj.disp() == 0) {
176 if (obj.base() != rdx) {
177 __ movl(rdx, obj.base());
178 }
179 } else {
180 __ leal(rdx, obj);
181 }
182 __ get_thread(rcx);
183 __ save_bcp();
184 if (ShenandoahSATBBarrier) {
185 __ g1_write_barrier_pre(rdx /* obj */,
186 rbx /* pre_val */,
187 rcx /* thread */,
188 rsi /* tmp */,
189 val != noreg /* tosca_live */,
190 false /* expand_call */);
191 }
192
193 // Do the actual store
194 // noreg means NULL
195 if (val == noreg) {
196 __ movptr(Address(rdx, 0), NULL_WORD);
197 // No post barrier for NULL
198 } else {
199 ShenandoahBarrierSetAssembler::bsasm()->storeval_barrier(_masm, val, rsi);
200 __ movl(Address(rdx, 0), val);
201 }
202 __ restore_bcp();
203
204 }
205 break;
206 #endif // INCLUDE_ALL_GCS
207 case BarrierSet::CardTableModRef:
208 case BarrierSet::CardTableExtension:
209 {
210 if (val == noreg) {
211 __ movptr(obj, NULL_WORD);
212 } else {
213 __ movl(obj, val);
214 // flatten object address if needed
215 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
216 __ store_check(obj.base());
217 } else {
218 __ leal(rdx, obj);
219 __ store_check(rdx);
220 }
221 }
222 }
223 break;
224 case BarrierSet::ModRef:
225 case BarrierSet::Other:
689 index_check(rdx, rax); // kills rbx,
690 // rax,: index
691 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
692 }
693
694
695 void TemplateTable::daload() {
696 transition(itos, dtos);
697 // rdx: array
698 index_check(rdx, rax); // kills rbx,
699 // rax,: index
700 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
701 }
702
703
704 void TemplateTable::aaload() {
705 transition(itos, atos);
706 // rdx: array
707 index_check(rdx, rax); // kills rbx,
708 // rax,: index
709 #if INCLUDE_ALL_GCS
710 if (UseShenandoahGC) {
711 // Needs GC barriers
712 __ load_heap_oop(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
713 } else
714 #endif
715 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
716
717 }
718
719
720 void TemplateTable::baload() {
721 transition(itos, itos);
722 // rdx: array
723 index_check(rdx, rax); // kills rbx,
724 // rax,: index
725 // can do better code for P5 - fix this at some point
726 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
727 __ mov(rax, rbx);
728 }
729
730
731 void TemplateTable::caload() {
732 transition(itos, itos);
733 // rdx: array
734 index_check(rdx, rax); // kills rbx,
735 // rax,: index
736 // can do better code for P5 - may want to improve this at some point
2331
2332 __ bind(notBool);
2333
2334 // itos
2335 __ cmpl(flags, itos );
2336 __ jcc(Assembler::notEqual, notInt);
2337
2338 __ movl(rax, lo );
2339 __ push(itos);
2340 // Rewrite bytecode to be faster
2341 if (!is_static) {
2342 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2343 }
2344 __ jmp(Done);
2345
2346 __ bind(notInt);
2347 // atos
2348 __ cmpl(flags, atos );
2349 __ jcc(Assembler::notEqual, notObj);
2350
2351 #if INCLUDE_ALL_GCS
2352 if (UseShenandoahGC) {
2353 // Needs GC barriers
2354 __ load_heap_oop(rax, lo);
2355 } else
2356 #endif
2357 __ movl(rax, lo );
2358 __ push(atos);
2359 if (!is_static) {
2360 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2361 }
2362 __ jmp(Done);
2363
2364 __ bind(notObj);
2365 // ctos
2366 __ cmpl(flags, ctos );
2367 __ jcc(Assembler::notEqual, notChar);
2368
2369 __ load_unsigned_short(rax, lo );
2370 __ push(ctos);
2371 if (!is_static) {
2372 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2373 }
2374 __ jmp(Done);
2375
2376 __ bind(notChar);
2905 Address::times_ptr,
2906 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2907
2908
2909 // rax,: object
2910 __ verify_oop(rax);
2911 __ null_check(rax);
2912 // field addresses
2913 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2914 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2915
2916 // access field
2917 switch (bytecode()) {
2918 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2919 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2920 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2921 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2922 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2923 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2924 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2925 case Bytecodes::_fast_agetfield:
2926 #if INCLUDE_ALL_GCS
2927 if (UseShenandoahGC) {
2928 // Needs GC barriers
2929 __ load_heap_oop(rax, lo);
2930 } else
2931 #endif
2932 __ movptr(rax, lo);
2933 __ verify_oop(rax);
2934 break;
2935 default:
2936 ShouldNotReachHere();
2937 }
2938
2939 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2940 // volatile_barrier( );
2941 }
2942
2943 void TemplateTable::fast_xaccess(TosState state) {
2944 transition(vtos, state);
2945 // get receiver
2946 __ movptr(rax, aaddress(0));
2947 // access constant pool cache
2948 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2949 __ movptr(rbx, Address(rcx,
2950 rdx,
2951 Address::times_ptr,
2952 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2953 // make sure exception is reported in correct bcp range (getfield is next instruction)
2954 __ increment(rsi);
2955 __ null_check(rax);
2956 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2957 if (state == itos) {
2958 __ movl(rax, lo);
2959 } else if (state == atos) {
2960 #if INCLUDE_ALL_GCS
2961 if (UseShenandoahGC) {
2962 // Needs GC barriers
2963 __ load_heap_oop(rax, lo);
2964 } else
2965 #endif
2966 __ movptr(rax, lo);
2967 __ verify_oop(rax);
2968 } else if (state == ftos) {
2969 __ fld_s(lo);
2970 } else {
2971 ShouldNotReachHere();
2972 }
2973 __ decrement(rsi);
2974 }
2975
2976
2977
2978 //----------------------------------------------------------------------------------------------------
2979 // Calls
2980
2981 void TemplateTable::count_calls(Register method, Register temp) {
2982 // implemented elsewhere
2983 ShouldNotReachHere();
2984 }
2985
3001 const bool save_flags = (flags != noreg);
3002 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3003 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3004 assert(flags == noreg || flags == rdx, "");
3005 assert(recv == noreg || recv == rcx, "");
3006
3007 // setup registers & access constant pool cache
3008 if (recv == noreg) recv = rcx;
3009 if (flags == noreg) flags = rdx;
3010 assert_different_registers(method, index, recv, flags);
3011
3012 // save 'interpreter return address'
3013 __ save_bcp();
3014
3015 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3016
3017 // maybe push appendix to arguments (just before return address)
3018 if (is_invokedynamic || is_invokehandle) {
3019 Label L_no_push;
3020 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3021 #if INCLUDE_ALL_GCS
3022 if (UseShenandoahGC) {
3023 // Shenandoah barrier is too large to make short jump.
3024 __ jcc(Assembler::zero, L_no_push);
3025 } else
3026 #endif
3027 __ jccb(Assembler::zero, L_no_push);
3028 // Push the appendix as a trailing parameter.
3029 // This must be done before we get the receiver,
3030 // since the parameter_size includes it.
3031 __ push(rbx);
3032 __ mov(rbx, index);
3033 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
3034 __ load_resolved_reference_at_index(index, rbx);
3035 __ pop(rbx);
3036 __ push(index); // push appendix (MethodType, CallSite, etc.)
3037 __ bind(L_no_push);
3038 }
3039
3040 // load receiver if needed (note: no return address pushed yet)
3041 if (load_receiver) {
3042 __ movl(recv, flags);
3043 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3044 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3045 const int receiver_is_at_end = -1; // back off one slot to get receiver
3046 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
|