< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page




 225 
 226     if (owner == Self) {
 227       m->_recursions++;
 228       return true;
 229     }
 230 
 231     // This Java Monitor is inflated so obj's header will never be
 232     // displaced to this thread's BasicLock. Make the displaced header
 233     // non-NULL so this BasicLock is not seen as recursive nor as
 234     // being locked. We do this unconditionally so that this thread's
 235     // BasicLock cannot be mis-interpreted by any stack walkers. For
 236     // performance reasons, stack walkers generally first check for
 237     // Biased Locking in the object's header, the second check is for
 238     // stack-locking in the object's header, the third check is for
 239     // recursive stack-locking in the displaced header in the BasicLock,
 240     // and last are the inflated Java Monitor (ObjectMonitor) checks.
 241     lock->set_displaced_header(markOopDesc::unused_mark());
 242 
 243     if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
 244       assert(m->_recursions == 0, "invariant");

 245       return true;
 246     }
 247   }
 248 
 249   // Note that we could inflate in quick_enter.
 250   // This is likely a useful optimization
 251   // Critically, in quick_enter() we must not:
 252   // -- perform bias revocation, or
 253   // -- block indefinitely, or
 254   // -- reach a safepoint
 255 
 256   return false;        // revert to slow-path
 257 }
 258 
 259 // -----------------------------------------------------------------------------
 260 //  Fast Monitor Enter/Exit
 261 // This the fast monitor enter. The interpreter and compiler use
 262 // some assembly copies of this code. Make sure update those code
 263 // if the following function is changed. The implementation is
 264 // extremely sensitive to race condition. Be careful.


1012   // More precisely, trigger an asynchronous STW safepoint as the number
1013   // of active monitors passes the specified threshold.
1014   // TODO: assert thread state is reasonable
1015 
1016   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
1017     // Induce a 'null' safepoint to scavenge monitors
1018     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1019     // to the VMthread and have a lifespan longer than that of this activation record.
1020     // The VMThread will delete the op when completed.
1021     VMThread::execute(new VM_ScavengeMonitors());
1022   }
1023 }
1024 
1025 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
1026   // A large MAXPRIVATE value reduces both list lock contention
1027   // and list coherency traffic, but also tends to increase the
1028   // number of objectMonitors in circulation as well as the STW
1029   // scavenge costs.  As usual, we lean toward time in space-time
1030   // tradeoffs.
1031   const int MAXPRIVATE = 1024;
1032   stringStream ss;
1033   for (;;) {
1034     ObjectMonitor * m;
1035 
1036     // 1: try to allocate from the thread's local omFreeList.
1037     // Threads will attempt to allocate first from their local list, then
1038     // from the global list, and only after those attempts fail will the thread
1039     // attempt to instantiate new monitors.   Thread-local free lists take
1040     // heat off the gListLock and improve allocation latency, as well as reducing
1041     // coherency traffic on the shared global list.
1042     m = Self->omFreeList;
1043     if (m != NULL) {
1044       Self->omFreeList = m->FreeNext;
1045       Self->omFreeCount--;
1046       guarantee(m->object() == NULL, "invariant");
1047       m->FreeNext = Self->omInUseList;
1048       Self->omInUseList = m;
1049       Self->omInUseCount++;
1050       return m;
1051     }
1052 
1053     // 2: try to allocate from the global gFreeList
1054     // CONSIDER: use muxTry() instead of muxAcquire().
1055     // If the muxTry() fails then drop immediately into case 3.
1056     // If we're using thread-local free lists then try
1057     // to reprovision the caller's free list.
1058     if (gFreeList != NULL) {
1059       // Reprovision the thread's omFreeList.
1060       // Use bulk transfers to reduce the allocation rate and heat
1061       // on various locks.
1062       Thread::muxAcquire(&gListLock, "omAlloc(1)");
1063       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1064         gMonitorFreeCount--;
1065         ObjectMonitor * take = gFreeList;
1066         gFreeList = take->FreeNext;
1067         guarantee(take->object() == NULL, "invariant");

1068         take->Recycle();
1069         omRelease(Self, take, false);
1070       }
1071       Thread::muxRelease(&gListLock);
1072       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1073       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1074 
1075       const int mx = MonitorBound;
1076       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1077         // We can't safely induce a STW safepoint from omAlloc() as our thread
1078         // state may not be appropriate for such activities and callers may hold
1079         // naked oops, so instead we defer the action.
1080         InduceScavenge(Self, "omAlloc");
1081       }
1082       continue;
1083     }
1084 
1085     // 3: allocate a block of new ObjectMonitors
1086     // Both the local and global free lists are empty -- resort to malloc().
1087     // In the current implementation objectMonitors are TSM - immortal.


1149     gFreeList = temp + 1;
1150     Thread::muxRelease(&gListLock);
1151   }
1152 }
1153 
1154 // Place "m" on the caller's private per-thread omFreeList.
1155 // In practice there's no need to clamp or limit the number of
1156 // monitors on a thread's omFreeList as the only time we'll call
1157 // omRelease is to return a monitor to the free list after a CAS
1158 // attempt failed.  This doesn't allow unbounded #s of monitors to
1159 // accumulate on a thread's free list.
1160 //
1161 // Key constraint: all ObjectMonitors on a thread's free list and the global
1162 // free list must have their object field set to null. This prevents the
1163 // scavenger -- deflate_monitor_list() -- from reclaiming them.
1164 
1165 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1166                                    bool fromPerThreadAlloc) {
1167   guarantee(m->header() == NULL, "invariant");
1168   guarantee(m->object() == NULL, "invariant");
1169   stringStream ss;
1170   guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1171             "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss),
1172             m->_recursions);
1173   // Remove from omInUseList
1174   if (fromPerThreadAlloc) {
1175     ObjectMonitor* cur_mid_in_use = NULL;
1176     bool extracted = false;
1177     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1178       if (m == mid) {
1179         // extract from per-thread in-use list
1180         if (mid == Self->omInUseList) {
1181           Self->omInUseList = mid->FreeNext;
1182         } else if (cur_mid_in_use != NULL) {
1183           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1184         }
1185         extracted = true;
1186         Self->omInUseCount--;
1187         break;
1188       }
1189     }
1190     assert(extracted, "Should have extracted from in-use list");
1191   }
1192 


1205 //
1206 // Also return the monitors of a moribund thread's omInUseList to
1207 // a global gOmInUseList under the global list lock so these
1208 // will continue to be scanned.
1209 //
1210 // We currently call omFlush() from Threads::remove() _before the thread
1211 // has been excised from the thread list and is no longer a mutator.
1212 // This means that omFlush() cannot run concurrently with a safepoint and
1213 // interleave with the deflate_idle_monitors scavenge operator. In particular,
1214 // this ensures that the thread's monitors are scanned by a GC safepoint,
1215 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via
1216 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
1217 // monitors have been transferred to the global in-use list).
1218 
1219 void ObjectSynchronizer::omFlush(Thread * Self) {
1220   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
1221   ObjectMonitor * tail = NULL;
1222   int tally = 0;
1223   if (list != NULL) {
1224     ObjectMonitor * s;
1225     // The thread is going away. Set 'tail' to the last per-thread free
1226     // monitor which will be linked to gFreeList below under the gListLock.
1227     stringStream ss;

1228     for (s = list; s != NULL; s = s->FreeNext) {
1229       tally++;
1230       tail = s;
1231       guarantee(s->object() == NULL, "invariant");
1232       guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));

1233     }
1234     guarantee(tail != NULL, "invariant");
1235     assert(Self->omFreeCount == tally, "free-count off");
1236     Self->omFreeList = NULL;
1237     Self->omFreeCount = 0;
1238   }
1239 
1240   ObjectMonitor * inUseList = Self->omInUseList;
1241   ObjectMonitor * inUseTail = NULL;
1242   int inUseTally = 0;
1243   if (inUseList != NULL) {
1244     ObjectMonitor *cur_om;
1245     // The thread is going away, however the omInUseList inflated
1246     // monitors may still be in-use by other threads.
1247     // Link them to inUseTail, which will be linked into the global in-use list
1248     // gOmInUseList below, under the gListLock
1249     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1250       inUseTail = cur_om;
1251       inUseTally++;
1252     }


1362     // the odds of inflation contention.
1363     //
1364     // We now use per-thread private objectmonitor free lists.
1365     // These list are reprovisioned from the global free list outside the
1366     // critical INFLATING...ST interval.  A thread can transfer
1367     // multiple objectmonitors en-mass from the global free list to its local free list.
1368     // This reduces coherency traffic and lock contention on the global free list.
1369     // Using such local free lists, it doesn't matter if the omAlloc() call appears
1370     // before or after the CAS(INFLATING) operation.
1371     // See the comments in omAlloc().
1372 
1373     LogStreamHandle(Trace, monitorinflation) lsh;
1374 
1375     if (mark->has_locker()) {
1376       ObjectMonitor * m = omAlloc(Self);
1377       // Optimistically prepare the objectmonitor - anticipate successful CAS
1378       // We do this before the CAS in order to minimize the length of time
1379       // in which INFLATING appears in the mark.
1380       m->Recycle();
1381       m->_Responsible  = NULL;

1382       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1383 
1384       markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
1385       if (cmp != mark) {
1386         omRelease(Self, m, true);
1387         continue;       // Interference -- just retry
1388       }
1389 
1390       // We've successfully installed INFLATING (0) into the mark-word.
1391       // This is the only case where 0 will appear in a mark-word.
1392       // Only the singular thread that successfully swings the mark-word
1393       // to 0 can perform (or more precisely, complete) inflation.
1394       //
1395       // Why do we CAS a 0 into the mark-word instead of just CASing the
1396       // mark-word from the stack-locked value directly to the new inflated state?
1397       // Consider what happens when a thread unlocks a stack-locked object.
1398       // It attempts to use CAS to swing the displaced header value from the
1399       // on-stack basiclock back into the object header.  Recall also that the
1400       // header value (hash code, etc) can reside in (a) the object header, or
1401       // (b) a displaced header associated with the stack-lock, or (c) a displaced


1454       return m;
1455     }
1456 
1457     // CASE: neutral
1458     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1459     // If we know we're inflating for entry it's better to inflate by swinging a
1460     // pre-locked objectMonitor pointer into the object header.   A successful
1461     // CAS inflates the object *and* confers ownership to the inflating thread.
1462     // In the current implementation we use a 2-step mechanism where we CAS()
1463     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1464     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1465     // would be useful.
1466 
1467     // Catch if the object's header is not neutral (not locked and
1468     // not marked is what we care about here).
1469     assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
1470     ObjectMonitor * m = omAlloc(Self);
1471     // prepare m for installation - set monitor to initial state
1472     m->Recycle();
1473     m->set_header(mark);

1474     m->set_object(object);

1475     m->_Responsible  = NULL;
1476     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1477 
1478     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1479       m->set_header(NULL);
1480       m->set_object(NULL);
1481       m->Recycle();
1482       omRelease(Self, m, true);
1483       m = NULL;
1484       continue;
1485       // interference - the markword changed - just retry.
1486       // The state-transitions are one-way, so there's no chance of
1487       // live-lock -- "Inflated" is an absorbing state.
1488     }
1489 
1490     // Hopefully the performance counters are allocated on distinct
1491     // cache lines to avoid false sharing on MP systems ...
1492     OM_PERFDATA_OP(Inflations, inc());
1493     if (log_is_enabled(Trace, monitorinflation)) {
1494       ResourceMark rm(Self);


1905   } else {
1906     log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
1907   }
1908 
1909   if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
1910       (!on_exit && log_is_enabled(Trace, monitorinflation))) {
1911     // When exiting this log output is at the Info level. When called
1912     // at a safepoint, this log output is at the Trace level since
1913     // there can be a lot of it.
1914     log_in_use_monitor_details(ls, on_exit);
1915   }
1916 
1917   ls->flush();
1918 
1919   guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1920 }
1921 
1922 // Check a free monitor entry; log any errors.
1923 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n,
1924                                         outputStream * out, int *error_cnt_p) {
1925   stringStream ss;
1926   if (n->is_busy()) {
1927     if (jt != NULL) {
1928       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1929                     ": free per-thread monitor must not be busy: %s", p2i(jt),
1930                     p2i(n), n->is_busy_to_string(&ss));
1931     } else {
1932       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1933                     "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
1934     }
1935     *error_cnt_p = *error_cnt_p + 1;
1936   }
1937   if (n->header() != NULL) {
1938     if (jt != NULL) {
1939       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1940                     ": free per-thread monitor must have NULL _header "
1941                     "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
1942                     p2i(n->header()));
1943     } else {
1944       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1945                     "must have NULL _header field: _header=" INTPTR_FORMAT,
1946                     p2i(n), p2i(n->header()));
1947     }
1948     *error_cnt_p = *error_cnt_p + 1;
1949   }
1950   if (n->object() != NULL) {
1951     if (jt != NULL) {
1952       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1953                     ": free per-thread monitor must have NULL _object "


2092                   "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
2093                   chkOmInUseCount);
2094   } else {
2095     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not "
2096                   "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
2097                   chkOmInUseCount);
2098     *error_cnt_p = *error_cnt_p + 1;
2099   }
2100 }
2101 
2102 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
2103 // flags indicate why the entry is in-use, 'object' and 'object type'
2104 // indicate the associated object and its type.
2105 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out,
2106                                                     bool on_exit) {
2107   if (!on_exit) {
2108     // Not at VM exit so grab the global list lock.
2109     Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
2110   }
2111 
2112   stringStream ss;
2113   if (gOmInUseCount > 0) {
2114     out->print_cr("In-use global monitor info:");
2115     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2116     out->print_cr("%18s  %s  %18s  %18s",
2117                   "monitor", "BHL", "object", "object type");
2118     out->print_cr("==================  ===  ==================  ==================");
2119     for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
2120       const oop obj = (oop) n->object();
2121       const markOop mark = n->header();
2122       ResourceMark rm;
2123       out->print(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(n),
2124                  n->is_busy() != 0, mark->hash() != 0, n->owner() != NULL,
2125                  p2i(obj), obj->klass()->external_name());
2126       if (n->is_busy() != 0) {
2127         out->print(" (%s)", n->is_busy_to_string(&ss));
2128         ss.reset();
2129       }
2130       out->cr();
2131     }
2132   }
2133 
2134   if (!on_exit) {
2135     Thread::muxRelease(&gListLock);
2136   }
2137 
2138   out->print_cr("In-use per-thread monitor info:");
2139   out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2140   out->print_cr("%18s  %18s  %s  %18s  %18s",
2141                 "jt", "monitor", "BHL", "object", "object type");
2142   out->print_cr("==================  ==================  ===  ==================  ==================");
2143   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2144     for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
2145       const oop obj = (oop) n->object();
2146       const markOop mark = n->header();
2147       ResourceMark rm;
2148       out->print(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT
2149                  "  %s", p2i(jt), p2i(n), n->is_busy() != 0,
2150                  mark->hash() != 0, n->owner() != NULL, p2i(obj),
2151                  obj->klass()->external_name());
2152       if (n->is_busy() != 0) {
2153         out->print(" (%s)", n->is_busy_to_string(&ss));
2154         ss.reset();
2155       }
2156       out->cr();
2157     }
2158   }
2159 
2160   out->flush();
2161 }
2162 
2163 // Log counts for the global and per-thread monitor lists and return
2164 // the population count.
2165 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
2166   int popCount = 0;
2167   out->print_cr("%18s  %10s  %10s  %10s",
2168                 "Global Lists:", "InUse", "Free", "Total");
2169   out->print_cr("==================  ==========  ==========  ==========");
2170   out->print_cr("%18s  %10d  %10d  %10d", "",
2171                 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation);
2172   popCount += gOmInUseCount + gMonitorFreeCount;
2173 
2174   out->print_cr("%18s  %10s  %10s  %10s",
2175                 "Per-Thread Lists:", "InUse", "Free", "Provision");
2176   out->print_cr("==================  ==========  ==========  ==========");




 225 
 226     if (owner == Self) {
 227       m->_recursions++;
 228       return true;
 229     }
 230 
 231     // This Java Monitor is inflated so obj's header will never be
 232     // displaced to this thread's BasicLock. Make the displaced header
 233     // non-NULL so this BasicLock is not seen as recursive nor as
 234     // being locked. We do this unconditionally so that this thread's
 235     // BasicLock cannot be mis-interpreted by any stack walkers. For
 236     // performance reasons, stack walkers generally first check for
 237     // Biased Locking in the object's header, the second check is for
 238     // stack-locking in the object's header, the third check is for
 239     // recursive stack-locking in the displaced header in the BasicLock,
 240     // and last are the inflated Java Monitor (ObjectMonitor) checks.
 241     lock->set_displaced_header(markOopDesc::unused_mark());
 242 
 243     if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
 244       assert(m->_recursions == 0, "invariant");
 245       assert(m->_owner == Self, "invariant");
 246       return true;
 247     }
 248   }
 249 
 250   // Note that we could inflate in quick_enter.
 251   // This is likely a useful optimization
 252   // Critically, in quick_enter() we must not:
 253   // -- perform bias revocation, or
 254   // -- block indefinitely, or
 255   // -- reach a safepoint
 256 
 257   return false;        // revert to slow-path
 258 }
 259 
 260 // -----------------------------------------------------------------------------
 261 //  Fast Monitor Enter/Exit
 262 // This the fast monitor enter. The interpreter and compiler use
 263 // some assembly copies of this code. Make sure update those code
 264 // if the following function is changed. The implementation is
 265 // extremely sensitive to race condition. Be careful.


1013   // More precisely, trigger an asynchronous STW safepoint as the number
1014   // of active monitors passes the specified threshold.
1015   // TODO: assert thread state is reasonable
1016 
1017   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
1018     // Induce a 'null' safepoint to scavenge monitors
1019     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1020     // to the VMthread and have a lifespan longer than that of this activation record.
1021     // The VMThread will delete the op when completed.
1022     VMThread::execute(new VM_ScavengeMonitors());
1023   }
1024 }
1025 
1026 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
1027   // A large MAXPRIVATE value reduces both list lock contention
1028   // and list coherency traffic, but also tends to increase the
1029   // number of objectMonitors in circulation as well as the STW
1030   // scavenge costs.  As usual, we lean toward time in space-time
1031   // tradeoffs.
1032   const int MAXPRIVATE = 1024;

1033   for (;;) {
1034     ObjectMonitor * m;
1035 
1036     // 1: try to allocate from the thread's local omFreeList.
1037     // Threads will attempt to allocate first from their local list, then
1038     // from the global list, and only after those attempts fail will the thread
1039     // attempt to instantiate new monitors.   Thread-local free lists take
1040     // heat off the gListLock and improve allocation latency, as well as reducing
1041     // coherency traffic on the shared global list.
1042     m = Self->omFreeList;
1043     if (m != NULL) {
1044       Self->omFreeList = m->FreeNext;
1045       Self->omFreeCount--;
1046       guarantee(m->object() == NULL, "invariant");
1047       m->FreeNext = Self->omInUseList;
1048       Self->omInUseList = m;
1049       Self->omInUseCount++;
1050       return m;
1051     }
1052 
1053     // 2: try to allocate from the global gFreeList
1054     // CONSIDER: use muxTry() instead of muxAcquire().
1055     // If the muxTry() fails then drop immediately into case 3.
1056     // If we're using thread-local free lists then try
1057     // to reprovision the caller's free list.
1058     if (gFreeList != NULL) {
1059       // Reprovision the thread's omFreeList.
1060       // Use bulk transfers to reduce the allocation rate and heat
1061       // on various locks.
1062       Thread::muxAcquire(&gListLock, "omAlloc(1)");
1063       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1064         gMonitorFreeCount--;
1065         ObjectMonitor * take = gFreeList;
1066         gFreeList = take->FreeNext;
1067         guarantee(take->object() == NULL, "invariant");
1068         guarantee(!take->is_busy(), "invariant");
1069         take->Recycle();
1070         omRelease(Self, take, false);
1071       }
1072       Thread::muxRelease(&gListLock);
1073       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1074       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1075 
1076       const int mx = MonitorBound;
1077       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1078         // We can't safely induce a STW safepoint from omAlloc() as our thread
1079         // state may not be appropriate for such activities and callers may hold
1080         // naked oops, so instead we defer the action.
1081         InduceScavenge(Self, "omAlloc");
1082       }
1083       continue;
1084     }
1085 
1086     // 3: allocate a block of new ObjectMonitors
1087     // Both the local and global free lists are empty -- resort to malloc().
1088     // In the current implementation objectMonitors are TSM - immortal.


1150     gFreeList = temp + 1;
1151     Thread::muxRelease(&gListLock);
1152   }
1153 }
1154 
1155 // Place "m" on the caller's private per-thread omFreeList.
1156 // In practice there's no need to clamp or limit the number of
1157 // monitors on a thread's omFreeList as the only time we'll call
1158 // omRelease is to return a monitor to the free list after a CAS
1159 // attempt failed.  This doesn't allow unbounded #s of monitors to
1160 // accumulate on a thread's free list.
1161 //
1162 // Key constraint: all ObjectMonitors on a thread's free list and the global
1163 // free list must have their object field set to null. This prevents the
1164 // scavenger -- deflate_monitor_list() -- from reclaiming them.
1165 
1166 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1167                                    bool fromPerThreadAlloc) {
1168   guarantee(m->header() == NULL, "invariant");
1169   guarantee(m->object() == NULL, "invariant");
1170   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");



1171   // Remove from omInUseList
1172   if (fromPerThreadAlloc) {
1173     ObjectMonitor* cur_mid_in_use = NULL;
1174     bool extracted = false;
1175     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1176       if (m == mid) {
1177         // extract from per-thread in-use list
1178         if (mid == Self->omInUseList) {
1179           Self->omInUseList = mid->FreeNext;
1180         } else if (cur_mid_in_use != NULL) {
1181           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1182         }
1183         extracted = true;
1184         Self->omInUseCount--;
1185         break;
1186       }
1187     }
1188     assert(extracted, "Should have extracted from in-use list");
1189   }
1190 


1203 //
1204 // Also return the monitors of a moribund thread's omInUseList to
1205 // a global gOmInUseList under the global list lock so these
1206 // will continue to be scanned.
1207 //
1208 // We currently call omFlush() from Threads::remove() _before the thread
1209 // has been excised from the thread list and is no longer a mutator.
1210 // This means that omFlush() cannot run concurrently with a safepoint and
1211 // interleave with the deflate_idle_monitors scavenge operator. In particular,
1212 // this ensures that the thread's monitors are scanned by a GC safepoint,
1213 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via
1214 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
1215 // monitors have been transferred to the global in-use list).
1216 
1217 void ObjectSynchronizer::omFlush(Thread * Self) {
1218   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
1219   ObjectMonitor * tail = NULL;
1220   int tally = 0;
1221   if (list != NULL) {
1222     ObjectMonitor * s;
1223     // The thread is going away, the per-thread free monitors
1224     // are freed via set_owner(NULL)
1225     // Link them to tail, which will be linked into the global free list
1226     // gFreeList below, under the gListLock
1227     for (s = list; s != NULL; s = s->FreeNext) {
1228       tally++;
1229       tail = s;
1230       guarantee(s->object() == NULL, "invariant");
1231       guarantee(!s->is_busy(), "invariant");
1232       s->set_owner(NULL);   // redundant but good hygiene
1233     }
1234     guarantee(tail != NULL, "invariant");
1235     assert(Self->omFreeCount == tally, "free-count off");
1236     Self->omFreeList = NULL;
1237     Self->omFreeCount = 0;
1238   }
1239 
1240   ObjectMonitor * inUseList = Self->omInUseList;
1241   ObjectMonitor * inUseTail = NULL;
1242   int inUseTally = 0;
1243   if (inUseList != NULL) {
1244     ObjectMonitor *cur_om;
1245     // The thread is going away, however the omInUseList inflated
1246     // monitors may still be in-use by other threads.
1247     // Link them to inUseTail, which will be linked into the global in-use list
1248     // gOmInUseList below, under the gListLock
1249     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1250       inUseTail = cur_om;
1251       inUseTally++;
1252     }


1362     // the odds of inflation contention.
1363     //
1364     // We now use per-thread private objectmonitor free lists.
1365     // These list are reprovisioned from the global free list outside the
1366     // critical INFLATING...ST interval.  A thread can transfer
1367     // multiple objectmonitors en-mass from the global free list to its local free list.
1368     // This reduces coherency traffic and lock contention on the global free list.
1369     // Using such local free lists, it doesn't matter if the omAlloc() call appears
1370     // before or after the CAS(INFLATING) operation.
1371     // See the comments in omAlloc().
1372 
1373     LogStreamHandle(Trace, monitorinflation) lsh;
1374 
1375     if (mark->has_locker()) {
1376       ObjectMonitor * m = omAlloc(Self);
1377       // Optimistically prepare the objectmonitor - anticipate successful CAS
1378       // We do this before the CAS in order to minimize the length of time
1379       // in which INFLATING appears in the mark.
1380       m->Recycle();
1381       m->_Responsible  = NULL;
1382       m->_recursions   = 0;
1383       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1384 
1385       markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
1386       if (cmp != mark) {
1387         omRelease(Self, m, true);
1388         continue;       // Interference -- just retry
1389       }
1390 
1391       // We've successfully installed INFLATING (0) into the mark-word.
1392       // This is the only case where 0 will appear in a mark-word.
1393       // Only the singular thread that successfully swings the mark-word
1394       // to 0 can perform (or more precisely, complete) inflation.
1395       //
1396       // Why do we CAS a 0 into the mark-word instead of just CASing the
1397       // mark-word from the stack-locked value directly to the new inflated state?
1398       // Consider what happens when a thread unlocks a stack-locked object.
1399       // It attempts to use CAS to swing the displaced header value from the
1400       // on-stack basiclock back into the object header.  Recall also that the
1401       // header value (hash code, etc) can reside in (a) the object header, or
1402       // (b) a displaced header associated with the stack-lock, or (c) a displaced


1455       return m;
1456     }
1457 
1458     // CASE: neutral
1459     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1460     // If we know we're inflating for entry it's better to inflate by swinging a
1461     // pre-locked objectMonitor pointer into the object header.   A successful
1462     // CAS inflates the object *and* confers ownership to the inflating thread.
1463     // In the current implementation we use a 2-step mechanism where we CAS()
1464     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1465     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1466     // would be useful.
1467 
1468     // Catch if the object's header is not neutral (not locked and
1469     // not marked is what we care about here).
1470     assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
1471     ObjectMonitor * m = omAlloc(Self);
1472     // prepare m for installation - set monitor to initial state
1473     m->Recycle();
1474     m->set_header(mark);
1475     m->set_owner(NULL);
1476     m->set_object(object);
1477     m->_recursions   = 0;
1478     m->_Responsible  = NULL;
1479     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1480 
1481     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1482       m->set_header(NULL);
1483       m->set_object(NULL);
1484       m->Recycle();
1485       omRelease(Self, m, true);
1486       m = NULL;
1487       continue;
1488       // interference - the markword changed - just retry.
1489       // The state-transitions are one-way, so there's no chance of
1490       // live-lock -- "Inflated" is an absorbing state.
1491     }
1492 
1493     // Hopefully the performance counters are allocated on distinct
1494     // cache lines to avoid false sharing on MP systems ...
1495     OM_PERFDATA_OP(Inflations, inc());
1496     if (log_is_enabled(Trace, monitorinflation)) {
1497       ResourceMark rm(Self);


1908   } else {
1909     log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
1910   }
1911 
1912   if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
1913       (!on_exit && log_is_enabled(Trace, monitorinflation))) {
1914     // When exiting this log output is at the Info level. When called
1915     // at a safepoint, this log output is at the Trace level since
1916     // there can be a lot of it.
1917     log_in_use_monitor_details(ls, on_exit);
1918   }
1919 
1920   ls->flush();
1921 
1922   guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1923 }
1924 
1925 // Check a free monitor entry; log any errors.
1926 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n,
1927                                         outputStream * out, int *error_cnt_p) {

1928   if (n->is_busy()) {
1929     if (jt != NULL) {
1930       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1931                     ": free per-thread monitor must not be busy.", p2i(jt),
1932                     p2i(n));
1933     } else {
1934       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1935                     "must not be busy.", p2i(n));
1936     }
1937     *error_cnt_p = *error_cnt_p + 1;
1938   }
1939   if (n->header() != NULL) {
1940     if (jt != NULL) {
1941       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1942                     ": free per-thread monitor must have NULL _header "
1943                     "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
1944                     p2i(n->header()));
1945     } else {
1946       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1947                     "must have NULL _header field: _header=" INTPTR_FORMAT,
1948                     p2i(n), p2i(n->header()));
1949     }
1950     *error_cnt_p = *error_cnt_p + 1;
1951   }
1952   if (n->object() != NULL) {
1953     if (jt != NULL) {
1954       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1955                     ": free per-thread monitor must have NULL _object "


2094                   "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
2095                   chkOmInUseCount);
2096   } else {
2097     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not "
2098                   "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
2099                   chkOmInUseCount);
2100     *error_cnt_p = *error_cnt_p + 1;
2101   }
2102 }
2103 
2104 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
2105 // flags indicate why the entry is in-use, 'object' and 'object type'
2106 // indicate the associated object and its type.
2107 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out,
2108                                                     bool on_exit) {
2109   if (!on_exit) {
2110     // Not at VM exit so grab the global list lock.
2111     Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
2112   }
2113 

2114   if (gOmInUseCount > 0) {
2115     out->print_cr("In-use global monitor info:");
2116     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2117     out->print_cr("%18s  %s  %18s  %18s",
2118                   "monitor", "BHL", "object", "object type");
2119     out->print_cr("==================  ===  ==================  ==================");
2120     for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
2121       const oop obj = (oop) n->object();
2122       const markOop mark = n->header();
2123       ResourceMark rm;
2124       out->print_cr(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(n),
2125                     n->is_busy() != 0, mark->hash() != 0, n->owner() != NULL,
2126                     p2i(obj), obj->klass()->external_name());





2127     }
2128   }
2129 
2130   if (!on_exit) {
2131     Thread::muxRelease(&gListLock);
2132   }
2133 
2134   out->print_cr("In-use per-thread monitor info:");
2135   out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2136   out->print_cr("%18s  %18s  %s  %18s  %18s",
2137                 "jt", "monitor", "BHL", "object", "object type");
2138   out->print_cr("==================  ==================  ===  ==================  ==================");
2139   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2140     for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
2141       const oop obj = (oop) n->object();
2142       const markOop mark = n->header();
2143       ResourceMark rm;
2144       out->print_cr(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT
2145                     "  %s", p2i(jt), p2i(n), n->is_busy() != 0,
2146                     mark->hash() != 0, n->owner() != NULL, p2i(obj),
2147                     obj->klass()->external_name());





2148     }
2149   }
2150 
2151   out->flush();
2152 }
2153 
2154 // Log counts for the global and per-thread monitor lists and return
2155 // the population count.
2156 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
2157   int popCount = 0;
2158   out->print_cr("%18s  %10s  %10s  %10s",
2159                 "Global Lists:", "InUse", "Free", "Total");
2160   out->print_cr("==================  ==========  ==========  ==========");
2161   out->print_cr("%18s  %10d  %10d  %10d", "",
2162                 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation);
2163   popCount += gOmInUseCount + gMonitorFreeCount;
2164 
2165   out->print_cr("%18s  %10s  %10s  %10s",
2166                 "Per-Thread Lists:", "InUse", "Free", "Provision");
2167   out->print_cr("==================  ==========  ==========  ==========");


< prev index next >