< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page


   1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1359   assert(caller_nm->is_alive() && !caller_nm->is_unloading(), "It should be alive");
1360 
1361 #ifndef PRODUCT
1362   // tracing/debugging/statistics
1363   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1364                 (is_virtual) ? (&_resolve_virtual_ctr) :
1365                                (&_resolve_static_ctr);
1366   Atomic::inc(addr);
1367 
1368   if (TraceCallFixup) {
1369     ResourceMark rm(thread);
1370     tty->print("resolving %s%s (%s) call to",
1371       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1372       Bytecodes::name(invoke_code));
1373     callee_method->print_short_name(tty);
1374     tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1375                   p2i(caller_frame.pc()), p2i(callee_method->code()));
1376   }
1377 #endif
1378 
1379   // Do not patch call site for static call when the class is not
1380   // fully initialized.
1381   if (invoke_code == Bytecodes::_invokestatic &&
1382       !callee_method->method_holder()->is_initialized()) {
1383     assert(callee_method->method_holder()->is_linked(), "must be");
1384     return callee_method;






1385   }
1386 
1387   // JSR 292 key invariant:
1388   // If the resolved method is a MethodHandle invoke target, the call
1389   // site must be a MethodHandle call site, because the lambda form might tail-call
1390   // leaving the stack in a state unknown to either caller or callee
1391   // TODO detune for now but we might need it again
1392 //  assert(!callee_method->is_compiled_lambda_form() ||
1393 //         caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1394 
1395   // Compute entry points. This might require generation of C2I converter
1396   // frames, so we cannot be holding any locks here. Furthermore, the
1397   // computation of the entry points is independent of patching the call.  We
1398   // always return the entry-point, but we only patch the stub if the call has
1399   // not been deoptimized.  Return values: For a virtual call this is an
1400   // (cached_oop, destination address) pair. For a static call/optimized
1401   // virtual this is just a destination address.
1402 
1403   // Patching IC caches may fail if we run out if transition stubs.
1404   // We refill the ic stubs then and try again.


1895       }
1896       // assert is too strong could also be resolve destinations.
1897       // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1898     }
1899   } else {
1900     if (TraceCallFixup) {
1901       tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1902       moop->print_short_name(tty);
1903       tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1904     }
1905   }
1906   return false;
1907 }
1908 
1909 // ---------------------------------------------------------------------------
1910 // We are calling the interpreter via a c2i. Normally this would mean that
1911 // we were called by a compiled method. However we could have lost a race
1912 // where we went int -> i2c -> c2i and so the caller could in fact be
1913 // interpreted. If the caller is compiled we attempt to patch the caller
1914 // so he no longer calls into the interpreter.
1915 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1916   Method* moop(method);
1917 
1918   address entry_point = moop->from_compiled_entry_no_trampoline();
1919 
1920   // It's possible that deoptimization can occur at a call site which hasn't
1921   // been resolved yet, in which case this function will be called from
1922   // an nmethod that has been patched for deopt and we can ignore the
1923   // request for a fixup.
1924   // Also it is possible that we lost a race in that from_compiled_entry
1925   // is now back to the i2c in that case we don't need to patch and if
1926   // we did we'd leap into space because the callsite needs to use
1927   // "to interpreter" stub in order to load up the Method*. Don't
1928   // ask me how I know this...
1929 
1930   CodeBlob* cb = CodeCache::find_blob(caller_pc);
1931   if (cb == NULL || !cb->is_compiled() || entry_point == moop->get_c2i_entry()) {
1932     return;
1933   }
1934 
1935   // The check above makes sure this is a nmethod.


1964       // there. If you're lucky you'll get the assert in the bugid, if not you've
1965       // just made a call site that could be megamorphic into a monomorphic site
1966       // for the rest of its life! Just another racing bug in the life of
1967       // fixup_callers_callsite ...
1968       //
1969       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
1970       iter.next();
1971       assert(iter.has_current(), "must have a reloc at java call site");
1972       relocInfo::relocType typ = iter.reloc()->type();
1973       if (typ != relocInfo::static_call_type &&
1974            typ != relocInfo::opt_virtual_call_type &&
1975            typ != relocInfo::static_stub_type) {
1976         return;
1977       }
1978       address destination = call->destination();
1979       if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {
1980         call->set_destination_mt_safe(entry_point);
1981       }
1982     }
1983   }
1984 JRT_END
1985 
1986 
1987 // same as JVM_Arraycopy, but called directly from compiled code
1988 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
1989                                                 oopDesc* dest, jint dest_pos,
1990                                                 jint length,
1991                                                 JavaThread* thread)) {
1992 #ifndef PRODUCT
1993   _slow_array_copy_ctr++;
1994 #endif
1995   // Check if we have null pointers
1996   if (src == NULL || dest == NULL) {
1997     THROW(vmSymbols::java_lang_NullPointerException());
1998   }
1999   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
2000   // even though the copy_array API also performs dynamic checks to ensure
2001   // that src and dest are truly arrays (and are conformable).
2002   // The copy_array mechanism is awkward and could be removed, but
2003   // the compilers don't call this function except as a last resort,
2004   // so it probably doesn't matter.


2233     double rest = sum;
2234     double percent = sum / 100;
2235     for (i = 0; i <= N; i++) {
2236       rest -= histo[i];
2237       tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
2238     }
2239     tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
2240     tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2241   }
2242 
2243   void print_histogram() {
2244     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2245     print_histogram_helper(_max_arity, _arity_histogram, "arity");
2246     tty->print_cr("\nSame for parameter size (in words):");
2247     print_histogram_helper(_max_size, _size_histogram, "size");
2248     tty->cr();
2249   }
2250 
2251  public:
2252   MethodArityHistogram() {
2253     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2254     _max_arity = _max_size = 0;
2255     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2256     CodeCache::nmethods_do(add_method_to_histogram);
2257     print_histogram();
2258   }
2259 };
2260 
2261 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2262 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2263 int MethodArityHistogram::_max_arity;
2264 int MethodArityHistogram::_max_size;
2265 
2266 void SharedRuntime::print_call_statistics(int comp_total) {
2267   tty->print_cr("Calls from compiled code:");
2268   int total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2269   int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
2270   int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
2271   tty->print_cr("\t%9d   (%4.1f%%) total non-inlined   ", total, percent(total, total));
2272   tty->print_cr("\t%9d   (%4.1f%%) virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
2273   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));


   1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1359   assert(caller_nm->is_alive() && !caller_nm->is_unloading(), "It should be alive");
1360 
1361 #ifndef PRODUCT
1362   // tracing/debugging/statistics
1363   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1364                 (is_virtual) ? (&_resolve_virtual_ctr) :
1365                                (&_resolve_static_ctr);
1366   Atomic::inc(addr);
1367 
1368   if (TraceCallFixup) {
1369     ResourceMark rm(thread);
1370     tty->print("resolving %s%s (%s) call to",
1371       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1372       Bytecodes::name(invoke_code));
1373     callee_method->print_short_name(tty);
1374     tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1375                   p2i(caller_frame.pc()), p2i(callee_method->code()));
1376   }
1377 #endif
1378 
1379   // Do not patch call site for static call to another class
1380   // when the class is not fully initialized.
1381   if (invoke_code == Bytecodes::_invokestatic) {
1382     if (!callee_method->method_holder()->is_initialized() &&
1383         callee_method->method_holder() != caller_nm->method()->method_holder()) {
1384       assert(callee_method->method_holder()->is_linked(), "must be");
1385       return callee_method;
1386     } else {
1387       assert(callee_method->method_holder()->is_initialized() ||
1388              callee_method->method_holder()->is_reentrant_initialization(thread),
1389              "invalid class initialization state for invoke_static");
1390     }
1391   }
1392 
1393   // JSR 292 key invariant:
1394   // If the resolved method is a MethodHandle invoke target, the call
1395   // site must be a MethodHandle call site, because the lambda form might tail-call
1396   // leaving the stack in a state unknown to either caller or callee
1397   // TODO detune for now but we might need it again
1398 //  assert(!callee_method->is_compiled_lambda_form() ||
1399 //         caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1400 
1401   // Compute entry points. This might require generation of C2I converter
1402   // frames, so we cannot be holding any locks here. Furthermore, the
1403   // computation of the entry points is independent of patching the call.  We
1404   // always return the entry-point, but we only patch the stub if the call has
1405   // not been deoptimized.  Return values: For a virtual call this is an
1406   // (cached_oop, destination address) pair. For a static call/optimized
1407   // virtual this is just a destination address.
1408 
1409   // Patching IC caches may fail if we run out if transition stubs.
1410   // We refill the ic stubs then and try again.


1901       }
1902       // assert is too strong could also be resolve destinations.
1903       // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1904     }
1905   } else {
1906     if (TraceCallFixup) {
1907       tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1908       moop->print_short_name(tty);
1909       tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1910     }
1911   }
1912   return false;
1913 }
1914 
1915 // ---------------------------------------------------------------------------
1916 // We are calling the interpreter via a c2i. Normally this would mean that
1917 // we were called by a compiled method. However we could have lost a race
1918 // where we went int -> i2c -> c2i and so the caller could in fact be
1919 // interpreted. If the caller is compiled we attempt to patch the caller
1920 // so he no longer calls into the interpreter.
1921 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1922   Method* moop(method);
1923 
1924   address entry_point = moop->from_compiled_entry_no_trampoline();
1925 
1926   // It's possible that deoptimization can occur at a call site which hasn't
1927   // been resolved yet, in which case this function will be called from
1928   // an nmethod that has been patched for deopt and we can ignore the
1929   // request for a fixup.
1930   // Also it is possible that we lost a race in that from_compiled_entry
1931   // is now back to the i2c in that case we don't need to patch and if
1932   // we did we'd leap into space because the callsite needs to use
1933   // "to interpreter" stub in order to load up the Method*. Don't
1934   // ask me how I know this...
1935 
1936   CodeBlob* cb = CodeCache::find_blob(caller_pc);
1937   if (cb == NULL || !cb->is_compiled() || entry_point == moop->get_c2i_entry()) {
1938     return;
1939   }
1940 
1941   // The check above makes sure this is a nmethod.


1970       // there. If you're lucky you'll get the assert in the bugid, if not you've
1971       // just made a call site that could be megamorphic into a monomorphic site
1972       // for the rest of its life! Just another racing bug in the life of
1973       // fixup_callers_callsite ...
1974       //
1975       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
1976       iter.next();
1977       assert(iter.has_current(), "must have a reloc at java call site");
1978       relocInfo::relocType typ = iter.reloc()->type();
1979       if (typ != relocInfo::static_call_type &&
1980            typ != relocInfo::opt_virtual_call_type &&
1981            typ != relocInfo::static_stub_type) {
1982         return;
1983       }
1984       address destination = call->destination();
1985       if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {
1986         call->set_destination_mt_safe(entry_point);
1987       }
1988     }
1989   }
1990 IRT_END
1991 
1992 
1993 // same as JVM_Arraycopy, but called directly from compiled code
1994 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
1995                                                 oopDesc* dest, jint dest_pos,
1996                                                 jint length,
1997                                                 JavaThread* thread)) {
1998 #ifndef PRODUCT
1999   _slow_array_copy_ctr++;
2000 #endif
2001   // Check if we have null pointers
2002   if (src == NULL || dest == NULL) {
2003     THROW(vmSymbols::java_lang_NullPointerException());
2004   }
2005   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
2006   // even though the copy_array API also performs dynamic checks to ensure
2007   // that src and dest are truly arrays (and are conformable).
2008   // The copy_array mechanism is awkward and could be removed, but
2009   // the compilers don't call this function except as a last resort,
2010   // so it probably doesn't matter.


2239     double rest = sum;
2240     double percent = sum / 100;
2241     for (i = 0; i <= N; i++) {
2242       rest -= histo[i];
2243       tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
2244     }
2245     tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
2246     tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2247   }
2248 
2249   void print_histogram() {
2250     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2251     print_histogram_helper(_max_arity, _arity_histogram, "arity");
2252     tty->print_cr("\nSame for parameter size (in words):");
2253     print_histogram_helper(_max_size, _size_histogram, "size");
2254     tty->cr();
2255   }
2256 
2257  public:
2258   MethodArityHistogram() {
2259     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2260     _max_arity = _max_size = 0;
2261     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2262     CodeCache::nmethods_do(add_method_to_histogram);
2263     print_histogram();
2264   }
2265 };
2266 
2267 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2268 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2269 int MethodArityHistogram::_max_arity;
2270 int MethodArityHistogram::_max_size;
2271 
2272 void SharedRuntime::print_call_statistics(int comp_total) {
2273   tty->print_cr("Calls from compiled code:");
2274   int total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2275   int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
2276   int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
2277   tty->print_cr("\t%9d   (%4.1f%%) total non-inlined   ", total, percent(total, total));
2278   tty->print_cr("\t%9d   (%4.1f%%) virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
2279   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));


< prev index next >