1209 const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size;
1210 const int start_page = native_call ? n_shadow_pages : 1;
1211 BLOCK_COMMENT("bang_stack_shadow_pages:");
1212 for (int pages = start_page; pages <= n_shadow_pages; pages++) {
1213 __ bang_stack_with_offset(pages*page_size);
1214 }
1215 }
1216
1217 // Interpreter stub for calling a native method. (asm interpreter)
1218 // This sets up a somewhat different looking stack for calling the
1219 // native method than the typical interpreter frame setup.
1220 //
1221 // On entry:
1222 // R19_method - method
1223 // R16_thread - JavaThread*
1224 // R15_esp - intptr_t* sender tos
1225 //
1226 // abstract stack (grows up)
1227 // [ IJava (caller of JNI callee) ] <-- ASP
1228 // ...
1229 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
1230
1231 address entry = __ pc();
1232
1233 const bool inc_counter = UseCompiler || CountCompiledCalls;
1234
1235 // -----------------------------------------------------------------------------
1236 // Allocate a new frame that represents the native callee (i2n frame).
1237 // This is not a full-blown interpreter frame, but in particular, the
1238 // following registers are valid after this:
1239 // - R19_method
1240 // - R18_local (points to start of arguments to native function)
1241 //
1242 // abstract stack (grows up)
1243 // [ IJava (caller of JNI callee) ] <-- ASP
1244 // ...
1245
1246 const Register signature_handler_fd = R11_scratch1;
1247 const Register pending_exception = R0;
1248 const Register result_handler_addr = R31;
1249 const Register native_method_fd = R12_scratch2; // preferred in MacroAssembler::branch_to
1668 __ mr(R4_ARG2/*issuing_pc*/, return_pc);
1669
1670 // Return to exception handler.
1671 __ blr();
1672
1673 //=============================================================================
1674 // Counter overflow.
1675
1676 if (inc_counter) {
1677 // Handle invocation counter overflow.
1678 __ bind(invocation_counter_overflow);
1679
1680 generate_counter_overflow(continue_after_compile);
1681 }
1682
1683 return entry;
1684 }
1685
1686 // Generic interpreted method entry to (asm) interpreter.
1687 //
1688 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1689 bool inc_counter = UseCompiler || CountCompiledCalls;
1690 address entry = __ pc();
1691 // Generate the code to allocate the interpreter stack frame.
1692 Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
1693 Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame.
1694
1695 // Does also a stack check to assure this frame fits on the stack.
1696 generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
1697
1698 // --------------------------------------------------------------------------
1699 // Zero out non-parameter locals.
1700 // Note: *Always* zero out non-parameter locals as Sparc does. It's not
1701 // worth to ask the flag, just do it.
1702 Register Rslot_addr = R6_ARG4,
1703 Rnum = R7_ARG5;
1704 Label Lno_locals, Lzero_loop;
1705
1706 // Set up the zeroing loop.
1707 __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
1708 __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
2239 address& fep,
2240 address& dep,
2241 address& vep) {
2242 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
2243 Label L;
2244
2245 aep = __ pc(); __ push_ptr(); __ b(L);
2246 fep = __ pc(); __ push_f(); __ b(L);
2247 dep = __ pc(); __ push_d(); __ b(L);
2248 lep = __ pc(); __ push_l(); __ b(L);
2249 __ align(32, 12, 24); // align L
2250 bep = cep = sep =
2251 iep = __ pc(); __ push_i();
2252 vep = __ pc();
2253 __ bind(L);
2254 generate_and_dispatch(t);
2255 }
2256
2257 //-----------------------------------------------------------------------------
2258
2259 // Non-product code
2260 #ifndef PRODUCT
2261 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2262 //__ flush_bundle();
2263 address entry = __ pc();
2264
2265 const char *bname = nullptr;
2266 uint tsize = 0;
2267 switch(state) {
2268 case ftos:
2269 bname = "trace_code_ftos {";
2270 tsize = 2;
2271 break;
2272 case btos:
2273 bname = "trace_code_btos {";
2274 tsize = 2;
2275 break;
2276 case ztos:
2277 bname = "trace_code_ztos {";
2278 tsize = 2;
2326 __ blt(CCR0, Lskip_vm_call);
2327 }
2328
2329 __ push(state);
2330 // Load 2 topmost expression stack values.
2331 __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
2332 __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
2333 __ mflr(R31);
2334 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
2335 __ mtlr(R31);
2336 __ pop(state);
2337
2338 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
2339 __ bind(Lskip_vm_call);
2340 }
2341 __ blr();
2342 BLOCK_COMMENT("} trace_code");
2343 return entry;
2344 }
2345
2346 void TemplateInterpreterGenerator::count_bytecode() {
2347 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
2348 __ lwz(R12_scratch2, offs, R11_scratch1);
2349 __ addi(R12_scratch2, R12_scratch2, 1);
2350 __ stw(R12_scratch2, offs, R11_scratch1);
2351 }
2352
2353 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2354 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
2355 __ lwz(R12_scratch2, offs, R11_scratch1);
2356 __ addi(R12_scratch2, R12_scratch2, 1);
2357 __ stw(R12_scratch2, offs, R11_scratch1);
2358 }
2359
2360 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2361 const Register addr = R11_scratch1,
2362 tmp = R12_scratch2;
2363 // Get index, shift out old bytecode, bring in new bytecode, and store it.
2364 // _index = (_index >> log2_number_of_codes) |
2365 // (bytecode << log2_number_of_codes);
2366 int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
2367 __ lwz(tmp, offs1, addr);
2368 __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
2369 __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
2370 __ stw(tmp, offs1, addr);
2371
2372 // Bump bucket contents.
2373 // _counters[_index] ++;
2374 int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
2375 __ sldi(tmp, tmp, LogBytesPerInt);
2376 __ add(addr, tmp, addr);
2377 __ lwz(tmp, offs2, addr);
2378 __ addi(tmp, tmp, 1);
2379 __ stw(tmp, offs2, addr);
|
1209 const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size;
1210 const int start_page = native_call ? n_shadow_pages : 1;
1211 BLOCK_COMMENT("bang_stack_shadow_pages:");
1212 for (int pages = start_page; pages <= n_shadow_pages; pages++) {
1213 __ bang_stack_with_offset(pages*page_size);
1214 }
1215 }
1216
1217 // Interpreter stub for calling a native method. (asm interpreter)
1218 // This sets up a somewhat different looking stack for calling the
1219 // native method than the typical interpreter frame setup.
1220 //
1221 // On entry:
1222 // R19_method - method
1223 // R16_thread - JavaThread*
1224 // R15_esp - intptr_t* sender tos
1225 //
1226 // abstract stack (grows up)
1227 // [ IJava (caller of JNI callee) ] <-- ASP
1228 // ...
1229 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
1230
1231 address entry = __ pc();
1232
1233 const bool inc_counter = UseCompiler || CountCompiledCalls;
1234
1235 // -----------------------------------------------------------------------------
1236 // Allocate a new frame that represents the native callee (i2n frame).
1237 // This is not a full-blown interpreter frame, but in particular, the
1238 // following registers are valid after this:
1239 // - R19_method
1240 // - R18_local (points to start of arguments to native function)
1241 //
1242 // abstract stack (grows up)
1243 // [ IJava (caller of JNI callee) ] <-- ASP
1244 // ...
1245
1246 const Register signature_handler_fd = R11_scratch1;
1247 const Register pending_exception = R0;
1248 const Register result_handler_addr = R31;
1249 const Register native_method_fd = R12_scratch2; // preferred in MacroAssembler::branch_to
1668 __ mr(R4_ARG2/*issuing_pc*/, return_pc);
1669
1670 // Return to exception handler.
1671 __ blr();
1672
1673 //=============================================================================
1674 // Counter overflow.
1675
1676 if (inc_counter) {
1677 // Handle invocation counter overflow.
1678 __ bind(invocation_counter_overflow);
1679
1680 generate_counter_overflow(continue_after_compile);
1681 }
1682
1683 return entry;
1684 }
1685
1686 // Generic interpreted method entry to (asm) interpreter.
1687 //
1688 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1689 bool inc_counter = UseCompiler || CountCompiledCalls;
1690 address entry = __ pc();
1691 // Generate the code to allocate the interpreter stack frame.
1692 Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
1693 Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame.
1694
1695 // Does also a stack check to assure this frame fits on the stack.
1696 generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
1697
1698 // --------------------------------------------------------------------------
1699 // Zero out non-parameter locals.
1700 // Note: *Always* zero out non-parameter locals as Sparc does. It's not
1701 // worth to ask the flag, just do it.
1702 Register Rslot_addr = R6_ARG4,
1703 Rnum = R7_ARG5;
1704 Label Lno_locals, Lzero_loop;
1705
1706 // Set up the zeroing loop.
1707 __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
1708 __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
2239 address& fep,
2240 address& dep,
2241 address& vep) {
2242 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
2243 Label L;
2244
2245 aep = __ pc(); __ push_ptr(); __ b(L);
2246 fep = __ pc(); __ push_f(); __ b(L);
2247 dep = __ pc(); __ push_d(); __ b(L);
2248 lep = __ pc(); __ push_l(); __ b(L);
2249 __ align(32, 12, 24); // align L
2250 bep = cep = sep =
2251 iep = __ pc(); __ push_i();
2252 vep = __ pc();
2253 __ bind(L);
2254 generate_and_dispatch(t);
2255 }
2256
2257 //-----------------------------------------------------------------------------
2258
2259 void TemplateInterpreterGenerator::count_bytecode() {
2260 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
2261 __ lwz(R12_scratch2, offs, R11_scratch1);
2262 __ addi(R12_scratch2, R12_scratch2, 1);
2263 __ stw(R12_scratch2, offs, R11_scratch1);
2264 }
2265
2266 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2267 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
2268 __ lwz(R12_scratch2, offs, R11_scratch1);
2269 __ addi(R12_scratch2, R12_scratch2, 1);
2270 __ stw(R12_scratch2, offs, R11_scratch1);
2271 }
2272
2273 // Non-product code
2274 #ifndef PRODUCT
2275 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2276 //__ flush_bundle();
2277 address entry = __ pc();
2278
2279 const char *bname = nullptr;
2280 uint tsize = 0;
2281 switch(state) {
2282 case ftos:
2283 bname = "trace_code_ftos {";
2284 tsize = 2;
2285 break;
2286 case btos:
2287 bname = "trace_code_btos {";
2288 tsize = 2;
2289 break;
2290 case ztos:
2291 bname = "trace_code_ztos {";
2292 tsize = 2;
2340 __ blt(CCR0, Lskip_vm_call);
2341 }
2342
2343 __ push(state);
2344 // Load 2 topmost expression stack values.
2345 __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
2346 __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
2347 __ mflr(R31);
2348 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
2349 __ mtlr(R31);
2350 __ pop(state);
2351
2352 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
2353 __ bind(Lskip_vm_call);
2354 }
2355 __ blr();
2356 BLOCK_COMMENT("} trace_code");
2357 return entry;
2358 }
2359
2360 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2361 const Register addr = R11_scratch1,
2362 tmp = R12_scratch2;
2363 // Get index, shift out old bytecode, bring in new bytecode, and store it.
2364 // _index = (_index >> log2_number_of_codes) |
2365 // (bytecode << log2_number_of_codes);
2366 int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
2367 __ lwz(tmp, offs1, addr);
2368 __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
2369 __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
2370 __ stw(tmp, offs1, addr);
2371
2372 // Bump bucket contents.
2373 // _counters[_index] ++;
2374 int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
2375 __ sldi(tmp, tmp, LogBytesPerInt);
2376 __ add(addr, tmp, addr);
2377 __ lwz(tmp, offs2, addr);
2378 __ addi(tmp, tmp, 1);
2379 __ stw(tmp, offs2, addr);
|