< prev index next >

src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp

Print this page

 972 
 973   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 974   __ bgtu(sp, t0, L_done);
 975 
 976   for (int p = 1; p <= n_shadow_pages; p++) {
 977     __ bang_stack_with_offset(p * page_size);
 978   }
 979 
 980   // Record the new watermark, but only if the update is above the safe limit.
 981   // Otherwise, the next time around the check above would pass the safe limit.
 982   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 983   __ bleu(sp, t0, L_done);
 984   __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 985 
 986   __ bind(L_done);
 987 }
 988 
 989 // Interpreter stub for calling a native method. (asm interpreter)
 990 // This sets up a somewhat different looking stack for calling the
 991 // native method than the typical interpreter frame setup.
 992 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 993   // determine code generation flags
 994   bool inc_counter = UseCompiler || CountCompiledCalls;
 995 
 996   // x11: Method*
 997   // x30: sender sp
 998 
 999   address entry_point = __ pc();
1000 
1001   const Address constMethod       (xmethod, Method::const_offset());
1002   const Address access_flags      (xmethod, Method::access_flags_offset());
1003   const Address size_of_parameters(x12, ConstMethod::
1004                                    size_of_parameters_offset());
1005 
1006   // get parameter size (always needed)
1007   __ ld(x12, constMethod);
1008   __ load_unsigned_short(x12, size_of_parameters);
1009 
1010   // Native calls don't need the stack size check since they have no
1011   // expression stack and the arguments are already on the stack and
1012   // we only add a handful of words to the stack.
1013 
1014   // xmethod: Method*

1410 
1411   JFR_ONLY(__ leave_jfr_critical_section();)
1412 
1413   // restore sender sp
1414   __ mv(sp, esp);
1415 
1416   __ ret();
1417 
1418   if (inc_counter) {
1419     // Handle overflow of counter and compile method
1420     __ bind(invocation_counter_overflow);
1421     generate_counter_overflow(continue_after_compile);
1422   }
1423 
1424   return entry_point;
1425 }
1426 
1427 //
1428 // Generic interpreted method entry to (asm) interpreter
1429 //
1430 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1431 
1432   // determine code generation flags
1433   const bool inc_counter  = UseCompiler || CountCompiledCalls;
1434 
1435   // t0: sender sp
1436   address entry_point = __ pc();
1437 
1438   const Address constMethod(xmethod, Method::const_offset());
1439   const Address access_flags(xmethod, Method::access_flags_offset());
1440   const Address size_of_parameters(x13,
1441                                    ConstMethod::size_of_parameters_offset());
1442   const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1443 
1444   // get parameter size (always needed)
1445   // need to load the const method first
1446   __ ld(x13, constMethod);
1447   __ load_unsigned_short(x12, size_of_parameters);
1448 
1449   // x12: size of parameters
1450 
1451   __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1452   __ sub(x13, x13, x12); // x13 = no. of additional locals
1453 

1844 
1845 //-----------------------------------------------------------------------------
1846 
1847 // Non-product code
1848 #ifndef PRODUCT
1849 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1850   address entry = __ pc();
1851 
1852   __ push_reg(ra);
1853   __ push(state);
1854   __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1855   __ mv(c_rarg2, x10);  // Pass itos
1856   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1857   __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1858   __ pop(state);
1859   __ pop_reg(ra);
1860   __ ret();                                   // return from result handler
1861 
1862   return entry;
1863 }

1864 
1865 void TemplateInterpreterGenerator::count_bytecode() {
1866   __ mv(x7, (address) &BytecodeCounter::_counter_value);
1867   __ atomic_add(noreg, 1, x7);
1868 }
1869 
1870 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1871   __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1872   __ atomic_addw(noreg, 1, x7);
1873 }
1874 

1875 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1876   // Calculate new index for counter:
1877   //   _index = (_index >> log2_number_of_codes) |
1878   //            (bytecode << log2_number_of_codes);
1879   Register index_addr = t1;
1880   Register index = t0;
1881   __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1882   __ lw(index, index_addr);
1883   __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1884   __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1885   __ orrw(index, x7, index);
1886   __ sw(index, index_addr);
1887   // Bump bucket contents:
1888   //   _counters[_index] ++;
1889   Register counter_addr = t1;
1890   __ mv(x7, (address) &BytecodePairHistogram::_counters);
1891   __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1892   __ atomic_addw(noreg, 1, counter_addr);
1893  }
1894 

 972 
 973   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 974   __ bgtu(sp, t0, L_done);
 975 
 976   for (int p = 1; p <= n_shadow_pages; p++) {
 977     __ bang_stack_with_offset(p * page_size);
 978   }
 979 
 980   // Record the new watermark, but only if the update is above the safe limit.
 981   // Otherwise, the next time around the check above would pass the safe limit.
 982   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 983   __ bleu(sp, t0, L_done);
 984   __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 985 
 986   __ bind(L_done);
 987 }
 988 
 989 // Interpreter stub for calling a native method. (asm interpreter)
 990 // This sets up a somewhat different looking stack for calling the
 991 // native method than the typical interpreter frame setup.
 992 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
 993   // determine code generation flags
 994   bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
 995 
 996   // x11: Method*
 997   // x30: sender sp
 998 
 999   address entry_point = __ pc();
1000 
1001   const Address constMethod       (xmethod, Method::const_offset());
1002   const Address access_flags      (xmethod, Method::access_flags_offset());
1003   const Address size_of_parameters(x12, ConstMethod::
1004                                    size_of_parameters_offset());
1005 
1006   // get parameter size (always needed)
1007   __ ld(x12, constMethod);
1008   __ load_unsigned_short(x12, size_of_parameters);
1009 
1010   // Native calls don't need the stack size check since they have no
1011   // expression stack and the arguments are already on the stack and
1012   // we only add a handful of words to the stack.
1013 
1014   // xmethod: Method*

1410 
1411   JFR_ONLY(__ leave_jfr_critical_section();)
1412 
1413   // restore sender sp
1414   __ mv(sp, esp);
1415 
1416   __ ret();
1417 
1418   if (inc_counter) {
1419     // Handle overflow of counter and compile method
1420     __ bind(invocation_counter_overflow);
1421     generate_counter_overflow(continue_after_compile);
1422   }
1423 
1424   return entry_point;
1425 }
1426 
1427 //
1428 // Generic interpreted method entry to (asm) interpreter
1429 //
1430 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1431 
1432   // determine code generation flags
1433   const bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1434 
1435   // t0: sender sp
1436   address entry_point = __ pc();
1437 
1438   const Address constMethod(xmethod, Method::const_offset());
1439   const Address access_flags(xmethod, Method::access_flags_offset());
1440   const Address size_of_parameters(x13,
1441                                    ConstMethod::size_of_parameters_offset());
1442   const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1443 
1444   // get parameter size (always needed)
1445   // need to load the const method first
1446   __ ld(x13, constMethod);
1447   __ load_unsigned_short(x12, size_of_parameters);
1448 
1449   // x12: size of parameters
1450 
1451   __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1452   __ sub(x13, x13, x12); // x13 = no. of additional locals
1453 

1844 
1845 //-----------------------------------------------------------------------------
1846 
1847 // Non-product code
1848 #ifndef PRODUCT
1849 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1850   address entry = __ pc();
1851 
1852   __ push_reg(ra);
1853   __ push(state);
1854   __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1855   __ mv(c_rarg2, x10);  // Pass itos
1856   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1857   __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1858   __ pop(state);
1859   __ pop_reg(ra);
1860   __ ret();                                   // return from result handler
1861 
1862   return entry;
1863 }
1864 #endif // PRODUCT
1865 
1866 void TemplateInterpreterGenerator::count_bytecode() {
1867   __ mv(x7, (address) &BytecodeCounter::_counter_value);
1868   __ atomic_add(noreg, 1, x7);
1869 }
1870 
1871 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1872   __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1873   __ atomic_addw(noreg, 1, x7);
1874 }
1875 
1876 #ifndef PRODUCT
1877 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1878   // Calculate new index for counter:
1879   //   _index = (_index >> log2_number_of_codes) |
1880   //            (bytecode << log2_number_of_codes);
1881   Register index_addr = t1;
1882   Register index = t0;
1883   __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1884   __ lw(index, index_addr);
1885   __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1886   __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1887   __ orrw(index, x7, index);
1888   __ sw(index, index_addr);
1889   // Bump bucket contents:
1890   //   _counters[_index] ++;
1891   Register counter_addr = t1;
1892   __ mv(x7, (address) &BytecodePairHistogram::_counters);
1893   __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1894   __ atomic_addw(noreg, 1, counter_addr);
1895  }
1896 
< prev index next >