< prev index next >

src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp

Print this page

1208   const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size;
1209   const int start_page = native_call ? n_shadow_pages : 1;
1210   BLOCK_COMMENT("bang_stack_shadow_pages:");
1211   for (int pages = start_page; pages <= n_shadow_pages; pages++) {
1212     __ bang_stack_with_offset(pages*page_size);
1213   }
1214 }
1215 
1216 // Interpreter stub for calling a native method. (asm interpreter)
1217 // This sets up a somewhat different looking stack for calling the
1218 // native method than the typical interpreter frame setup.
1219 //
1220 // On entry:
1221 //   R19_method    - method
1222 //   R16_thread    - JavaThread*
1223 //   R15_esp       - intptr_t* sender tos
1224 //
1225 //   abstract stack (grows up)
1226 //     [  IJava (caller of JNI callee)  ]  <-- ASP
1227 //        ...
1228 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
1229 
1230   address entry = __ pc();
1231 
1232   const bool inc_counter = UseCompiler || CountCompiledCalls;
1233 
1234   // -----------------------------------------------------------------------------
1235   // Allocate a new frame that represents the native callee (i2n frame).
1236   // This is not a full-blown interpreter frame, but in particular, the
1237   // following registers are valid after this:
1238   // - R19_method
1239   // - R18_local (points to start of arguments to native function)
1240   //
1241   //   abstract stack (grows up)
1242   //     [  IJava (caller of JNI callee)  ]  <-- ASP
1243   //        ...
1244 
1245   const Register signature_handler_fd = R11_scratch1;
1246   const Register pending_exception    = R0;
1247   const Register result_handler_addr  = R31;
1248   const Register native_method_fd     = R12_scratch2; // preferred in MacroAssembler::branch_to
1249   const Register access_flags         = R22_tmp2;
1250   const Register active_handles       = R11_scratch1; // R26_monitor saved to state.
1251   const Register sync_state           = R12_scratch2;
1252   const Register sync_state_addr      = sync_state;   // Address is dead after use.

1667   __ mr(R4_ARG2/*issuing_pc*/, return_pc);
1668 
1669   // Return to exception handler.
1670   __ blr();
1671 
1672   //=============================================================================
1673   // Counter overflow.
1674 
1675   if (inc_counter) {
1676     // Handle invocation counter overflow.
1677     __ bind(invocation_counter_overflow);
1678 
1679     generate_counter_overflow(continue_after_compile);
1680   }
1681 
1682   return entry;
1683 }
1684 
1685 // Generic interpreted method entry to (asm) interpreter.
1686 //
1687 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1688   bool inc_counter = UseCompiler || CountCompiledCalls;
1689   address entry = __ pc();
1690   // Generate the code to allocate the interpreter stack frame.
1691   Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
1692            Rsize_of_locals     = R5_ARG3; // Written by generate_fixed_frame.
1693 
1694   // Does also a stack check to assure this frame fits on the stack.
1695   generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
1696 
1697   // --------------------------------------------------------------------------
1698   // Zero out non-parameter locals.
1699   // Note: *Always* zero out non-parameter locals as Sparc does. It's not
1700   // worth to ask the flag, just do it.
1701   Register Rslot_addr = R6_ARG4,
1702            Rnum       = R7_ARG5;
1703   Label Lno_locals, Lzero_loop;
1704 
1705   // Set up the zeroing loop.
1706   __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
1707   __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
1708   __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);

2330     __ cmpd(CR0, R12_scratch2, R11_scratch1);
2331     __ blt(CR0, Lskip_vm_call);
2332   }
2333 
2334   __ push(state);
2335   // Load 2 topmost expression stack values.
2336   __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
2337   __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
2338   __ mflr(R31);
2339   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
2340   __ mtlr(R31);
2341   __ pop(state);
2342 
2343   if (TraceBytecodesAt > 0) {
2344     __ bind(Lskip_vm_call);
2345   }
2346   __ blr();
2347   BLOCK_COMMENT("} trace_code");
2348   return entry;
2349 }

2350 
2351 void TemplateInterpreterGenerator::count_bytecode() {
2352   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
2353   __ ld(R12_scratch2, offs, R11_scratch1);
2354   __ addi(R12_scratch2, R12_scratch2, 1);
2355   __ std(R12_scratch2, offs, R11_scratch1);
2356 }
2357 
2358 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2359   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
2360   __ lwz(R12_scratch2, offs, R11_scratch1);
2361   __ addi(R12_scratch2, R12_scratch2, 1);
2362   __ stw(R12_scratch2, offs, R11_scratch1);
2363 }
2364 

2365 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2366   const Register addr = R11_scratch1,
2367                  tmp  = R12_scratch2;
2368   // Get index, shift out old bytecode, bring in new bytecode, and store it.
2369   // _index = (_index >> log2_number_of_codes) |
2370   //          (bytecode << log2_number_of_codes);
2371   int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
2372   __ lwz(tmp, offs1, addr);
2373   __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
2374   __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
2375   __ stw(tmp, offs1, addr);
2376 
2377   // Bump bucket contents.
2378   // _counters[_index] ++;
2379   int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
2380   __ sldi(tmp, tmp, LogBytesPerInt);
2381   __ add(addr, tmp, addr);
2382   __ lwz(tmp, offs2, addr);
2383   __ addi(tmp, tmp, 1);
2384   __ stw(tmp, offs2, addr);

1208   const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size;
1209   const int start_page = native_call ? n_shadow_pages : 1;
1210   BLOCK_COMMENT("bang_stack_shadow_pages:");
1211   for (int pages = start_page; pages <= n_shadow_pages; pages++) {
1212     __ bang_stack_with_offset(pages*page_size);
1213   }
1214 }
1215 
1216 // Interpreter stub for calling a native method. (asm interpreter)
1217 // This sets up a somewhat different looking stack for calling the
1218 // native method than the typical interpreter frame setup.
1219 //
1220 // On entry:
1221 //   R19_method    - method
1222 //   R16_thread    - JavaThread*
1223 //   R15_esp       - intptr_t* sender tos
1224 //
1225 //   abstract stack (grows up)
1226 //     [  IJava (caller of JNI callee)  ]  <-- ASP
1227 //        ...
1228 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
1229 
1230   address entry = __ pc();
1231 
1232   const bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1233 
1234   // -----------------------------------------------------------------------------
1235   // Allocate a new frame that represents the native callee (i2n frame).
1236   // This is not a full-blown interpreter frame, but in particular, the
1237   // following registers are valid after this:
1238   // - R19_method
1239   // - R18_local (points to start of arguments to native function)
1240   //
1241   //   abstract stack (grows up)
1242   //     [  IJava (caller of JNI callee)  ]  <-- ASP
1243   //        ...
1244 
1245   const Register signature_handler_fd = R11_scratch1;
1246   const Register pending_exception    = R0;
1247   const Register result_handler_addr  = R31;
1248   const Register native_method_fd     = R12_scratch2; // preferred in MacroAssembler::branch_to
1249   const Register access_flags         = R22_tmp2;
1250   const Register active_handles       = R11_scratch1; // R26_monitor saved to state.
1251   const Register sync_state           = R12_scratch2;
1252   const Register sync_state_addr      = sync_state;   // Address is dead after use.

1667   __ mr(R4_ARG2/*issuing_pc*/, return_pc);
1668 
1669   // Return to exception handler.
1670   __ blr();
1671 
1672   //=============================================================================
1673   // Counter overflow.
1674 
1675   if (inc_counter) {
1676     // Handle invocation counter overflow.
1677     __ bind(invocation_counter_overflow);
1678 
1679     generate_counter_overflow(continue_after_compile);
1680   }
1681 
1682   return entry;
1683 }
1684 
1685 // Generic interpreted method entry to (asm) interpreter.
1686 //
1687 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1688   bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1689   address entry = __ pc();
1690   // Generate the code to allocate the interpreter stack frame.
1691   Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
1692            Rsize_of_locals     = R5_ARG3; // Written by generate_fixed_frame.
1693 
1694   // Does also a stack check to assure this frame fits on the stack.
1695   generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
1696 
1697   // --------------------------------------------------------------------------
1698   // Zero out non-parameter locals.
1699   // Note: *Always* zero out non-parameter locals as Sparc does. It's not
1700   // worth to ask the flag, just do it.
1701   Register Rslot_addr = R6_ARG4,
1702            Rnum       = R7_ARG5;
1703   Label Lno_locals, Lzero_loop;
1704 
1705   // Set up the zeroing loop.
1706   __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
1707   __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
1708   __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);

2330     __ cmpd(CR0, R12_scratch2, R11_scratch1);
2331     __ blt(CR0, Lskip_vm_call);
2332   }
2333 
2334   __ push(state);
2335   // Load 2 topmost expression stack values.
2336   __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
2337   __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
2338   __ mflr(R31);
2339   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
2340   __ mtlr(R31);
2341   __ pop(state);
2342 
2343   if (TraceBytecodesAt > 0) {
2344     __ bind(Lskip_vm_call);
2345   }
2346   __ blr();
2347   BLOCK_COMMENT("} trace_code");
2348   return entry;
2349 }
2350 #endif //PRODUCT
2351 
2352 void TemplateInterpreterGenerator::count_bytecode() {
2353   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
2354   __ ld(R12_scratch2, offs, R11_scratch1);
2355   __ addi(R12_scratch2, R12_scratch2, 1);
2356   __ std(R12_scratch2, offs, R11_scratch1);
2357 }
2358 
2359 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2360   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
2361   __ lwz(R12_scratch2, offs, R11_scratch1);
2362   __ addi(R12_scratch2, R12_scratch2, 1);
2363   __ stw(R12_scratch2, offs, R11_scratch1);
2364 }
2365 
2366 #ifndef PRODUCT
2367 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2368   const Register addr = R11_scratch1,
2369                  tmp  = R12_scratch2;
2370   // Get index, shift out old bytecode, bring in new bytecode, and store it.
2371   // _index = (_index >> log2_number_of_codes) |
2372   //          (bytecode << log2_number_of_codes);
2373   int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
2374   __ lwz(tmp, offs1, addr);
2375   __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
2376   __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
2377   __ stw(tmp, offs1, addr);
2378 
2379   // Bump bucket contents.
2380   // _counters[_index] ++;
2381   int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
2382   __ sldi(tmp, tmp, LogBytesPerInt);
2383   __ add(addr, tmp, addr);
2384   __ lwz(tmp, offs2, addr);
2385   __ addi(tmp, tmp, 1);
2386   __ stw(tmp, offs2, addr);
< prev index next >