< prev index next >

src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp

Print this page

1211   const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size;
1212   const int start_page = native_call ? n_shadow_pages : 1;
1213   BLOCK_COMMENT("bang_stack_shadow_pages:");
1214   for (int pages = start_page; pages <= n_shadow_pages; pages++) {
1215     __ bang_stack_with_offset(pages*page_size);
1216   }
1217 }
1218 
1219 // Interpreter stub for calling a native method. (asm interpreter)
1220 // This sets up a somewhat different looking stack for calling the
1221 // native method than the typical interpreter frame setup.
1222 //
1223 // On entry:
1224 //   R19_method    - method
1225 //   R16_thread    - JavaThread*
1226 //   R15_esp       - intptr_t* sender tos
1227 //
1228 //   abstract stack (grows up)
1229 //     [  IJava (caller of JNI callee)  ]  <-- ASP
1230 //        ...
1231 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
1232 
1233   address entry = __ pc();
1234 
1235   const bool inc_counter = UseCompiler || CountCompiledCalls;
1236 
1237   // -----------------------------------------------------------------------------
1238   // Allocate a new frame that represents the native callee (i2n frame).
1239   // This is not a full-blown interpreter frame, but in particular, the
1240   // following registers are valid after this:
1241   // - R19_method
1242   // - R18_local (points to start of arguments to native function)
1243   //
1244   //   abstract stack (grows up)
1245   //     [  IJava (caller of JNI callee)  ]  <-- ASP
1246   //        ...
1247 
1248   const Register signature_handler_fd = R11_scratch1;
1249   const Register pending_exception    = R0;
1250   const Register result_handler_addr  = R31;
1251   const Register native_method_fd     = R12_scratch2; // preferred in MacroAssembler::branch_to
1252   const Register access_flags         = R22_tmp2;
1253   const Register active_handles       = R11_scratch1; // R26_monitor saved to state.
1254   const Register sync_state           = R12_scratch2;
1255   const Register sync_state_addr      = sync_state;   // Address is dead after use.

1689   __ mr(R4_ARG2/*issuing_pc*/, return_pc);
1690 
1691   // Return to exception handler.
1692   __ blr();
1693 
1694   //=============================================================================
1695   // Counter overflow.
1696 
1697   if (inc_counter) {
1698     // Handle invocation counter overflow.
1699     __ bind(invocation_counter_overflow);
1700 
1701     generate_counter_overflow(continue_after_compile);
1702   }
1703 
1704   return entry;
1705 }
1706 
1707 // Generic interpreted method entry to (asm) interpreter.
1708 //
1709 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1710   bool inc_counter = UseCompiler || CountCompiledCalls;
1711   address entry = __ pc();
1712   // Generate the code to allocate the interpreter stack frame.
1713   Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
1714            Rsize_of_locals     = R5_ARG3; // Written by generate_fixed_frame.
1715 
1716   // Does also a stack check to assure this frame fits on the stack.
1717   generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
1718 
1719   // --------------------------------------------------------------------------
1720   // Zero out non-parameter locals.
1721   // Note: *Always* zero out non-parameter locals as Sparc does. It's not
1722   // worth to ask the flag, just do it.
1723   Register Rslot_addr = R6_ARG4,
1724            Rnum       = R7_ARG5;
1725   Label Lno_locals, Lzero_loop;
1726 
1727   // Set up the zeroing loop.
1728   __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
1729   __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
1730   __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);

2352     __ cmpd(CR0, R12_scratch2, R11_scratch1);
2353     __ blt(CR0, Lskip_vm_call);
2354   }
2355 
2356   __ push(state);
2357   // Load 2 topmost expression stack values.
2358   __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
2359   __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
2360   __ mflr(R31);
2361   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
2362   __ mtlr(R31);
2363   __ pop(state);
2364 
2365   if (TraceBytecodesAt > 0) {
2366     __ bind(Lskip_vm_call);
2367   }
2368   __ blr();
2369   BLOCK_COMMENT("} trace_code");
2370   return entry;
2371 }

2372 
2373 void TemplateInterpreterGenerator::count_bytecode() {
2374   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
2375   __ ld(R12_scratch2, offs, R11_scratch1);
2376   __ addi(R12_scratch2, R12_scratch2, 1);
2377   __ std(R12_scratch2, offs, R11_scratch1);
2378 }
2379 
2380 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2381   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
2382   __ lwz(R12_scratch2, offs, R11_scratch1);
2383   __ addi(R12_scratch2, R12_scratch2, 1);
2384   __ stw(R12_scratch2, offs, R11_scratch1);
2385 }
2386 

2387 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2388   const Register addr = R11_scratch1,
2389                  tmp  = R12_scratch2;
2390   // Get index, shift out old bytecode, bring in new bytecode, and store it.
2391   // _index = (_index >> log2_number_of_codes) |
2392   //          (bytecode << log2_number_of_codes);
2393   int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
2394   __ lwz(tmp, offs1, addr);
2395   __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
2396   __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
2397   __ stw(tmp, offs1, addr);
2398 
2399   // Bump bucket contents.
2400   // _counters[_index] ++;
2401   int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
2402   __ sldi(tmp, tmp, LogBytesPerInt);
2403   __ add(addr, tmp, addr);
2404   __ lwz(tmp, offs2, addr);
2405   __ addi(tmp, tmp, 1);
2406   __ stw(tmp, offs2, addr);

1211   const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size;
1212   const int start_page = native_call ? n_shadow_pages : 1;
1213   BLOCK_COMMENT("bang_stack_shadow_pages:");
1214   for (int pages = start_page; pages <= n_shadow_pages; pages++) {
1215     __ bang_stack_with_offset(pages*page_size);
1216   }
1217 }
1218 
1219 // Interpreter stub for calling a native method. (asm interpreter)
1220 // This sets up a somewhat different looking stack for calling the
1221 // native method than the typical interpreter frame setup.
1222 //
1223 // On entry:
1224 //   R19_method    - method
1225 //   R16_thread    - JavaThread*
1226 //   R15_esp       - intptr_t* sender tos
1227 //
1228 //   abstract stack (grows up)
1229 //     [  IJava (caller of JNI callee)  ]  <-- ASP
1230 //        ...
1231 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
1232 
1233   address entry = __ pc();
1234 
1235   const bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1236 
1237   // -----------------------------------------------------------------------------
1238   // Allocate a new frame that represents the native callee (i2n frame).
1239   // This is not a full-blown interpreter frame, but in particular, the
1240   // following registers are valid after this:
1241   // - R19_method
1242   // - R18_local (points to start of arguments to native function)
1243   //
1244   //   abstract stack (grows up)
1245   //     [  IJava (caller of JNI callee)  ]  <-- ASP
1246   //        ...
1247 
1248   const Register signature_handler_fd = R11_scratch1;
1249   const Register pending_exception    = R0;
1250   const Register result_handler_addr  = R31;
1251   const Register native_method_fd     = R12_scratch2; // preferred in MacroAssembler::branch_to
1252   const Register access_flags         = R22_tmp2;
1253   const Register active_handles       = R11_scratch1; // R26_monitor saved to state.
1254   const Register sync_state           = R12_scratch2;
1255   const Register sync_state_addr      = sync_state;   // Address is dead after use.

1689   __ mr(R4_ARG2/*issuing_pc*/, return_pc);
1690 
1691   // Return to exception handler.
1692   __ blr();
1693 
1694   //=============================================================================
1695   // Counter overflow.
1696 
1697   if (inc_counter) {
1698     // Handle invocation counter overflow.
1699     __ bind(invocation_counter_overflow);
1700 
1701     generate_counter_overflow(continue_after_compile);
1702   }
1703 
1704   return entry;
1705 }
1706 
1707 // Generic interpreted method entry to (asm) interpreter.
1708 //
1709 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1710   bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1711   address entry = __ pc();
1712   // Generate the code to allocate the interpreter stack frame.
1713   Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
1714            Rsize_of_locals     = R5_ARG3; // Written by generate_fixed_frame.
1715 
1716   // Does also a stack check to assure this frame fits on the stack.
1717   generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
1718 
1719   // --------------------------------------------------------------------------
1720   // Zero out non-parameter locals.
1721   // Note: *Always* zero out non-parameter locals as Sparc does. It's not
1722   // worth to ask the flag, just do it.
1723   Register Rslot_addr = R6_ARG4,
1724            Rnum       = R7_ARG5;
1725   Label Lno_locals, Lzero_loop;
1726 
1727   // Set up the zeroing loop.
1728   __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
1729   __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
1730   __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);

2352     __ cmpd(CR0, R12_scratch2, R11_scratch1);
2353     __ blt(CR0, Lskip_vm_call);
2354   }
2355 
2356   __ push(state);
2357   // Load 2 topmost expression stack values.
2358   __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
2359   __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
2360   __ mflr(R31);
2361   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
2362   __ mtlr(R31);
2363   __ pop(state);
2364 
2365   if (TraceBytecodesAt > 0) {
2366     __ bind(Lskip_vm_call);
2367   }
2368   __ blr();
2369   BLOCK_COMMENT("} trace_code");
2370   return entry;
2371 }
2372 #endif //PRODUCT
2373 
2374 void TemplateInterpreterGenerator::count_bytecode() {
2375   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
2376   __ ld(R12_scratch2, offs, R11_scratch1);
2377   __ addi(R12_scratch2, R12_scratch2, 1);
2378   __ std(R12_scratch2, offs, R11_scratch1);
2379 }
2380 
2381 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2382   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
2383   __ lwz(R12_scratch2, offs, R11_scratch1);
2384   __ addi(R12_scratch2, R12_scratch2, 1);
2385   __ stw(R12_scratch2, offs, R11_scratch1);
2386 }
2387 
2388 #ifndef PRODUCT
2389 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2390   const Register addr = R11_scratch1,
2391                  tmp  = R12_scratch2;
2392   // Get index, shift out old bytecode, bring in new bytecode, and store it.
2393   // _index = (_index >> log2_number_of_codes) |
2394   //          (bytecode << log2_number_of_codes);
2395   int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
2396   __ lwz(tmp, offs1, addr);
2397   __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
2398   __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
2399   __ stw(tmp, offs1, addr);
2400 
2401   // Bump bucket contents.
2402   // _counters[_index] ++;
2403   int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
2404   __ sldi(tmp, tmp, LogBytesPerInt);
2405   __ add(addr, tmp, addr);
2406   __ lwz(tmp, offs2, addr);
2407   __ addi(tmp, tmp, 1);
2408   __ stw(tmp, offs2, addr);
< prev index next >