606 }
607 for (int inner = 1; inner < inner_spin_count; inner++) {
608 SpinPause();
609 }
610 }
611
612 if (fast_lock_try_enter(obj, lock_stack, current)) return true;
613 }
614 return false;
615 }
616
617 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
618 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
619 JavaThread* current = JavaThread::current();
620 VerifyThreadState vts(locking_thread, current);
621
622 if (obj->klass()->is_value_based()) {
623 ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
624 }
625
626 locking_thread->inc_held_monitor_count();
627
628 CacheSetter cache_setter(locking_thread, lock);
629
630 LockStack& lock_stack = locking_thread->lock_stack();
631
632 ObjectMonitor* monitor = nullptr;
633 if (lock_stack.contains(obj())) {
634 monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
635 bool entered = monitor->enter_for(locking_thread);
636 assert(entered, "recursive ObjectMonitor::enter_for must succeed");
637 } else {
638 // It is assumed that enter_for must enter on an object without contention.
639 monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
640 }
641
642 assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
643 cache_setter.set_monitor(monitor);
644 }
645
646 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
647 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
648 assert(current == JavaThread::current(), "must be");
649
650 if (obj->klass()->is_value_based()) {
651 ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
652 }
653
654 current->inc_held_monitor_count();
655
656 CacheSetter cache_setter(current, lock);
657
658 // Used when deflation is observed. Progress here requires progress
659 // from the deflator. After observing that the deflator is not
660 // making progress (after two yields), switch to sleeping.
661 SpinYield spin_yield(0, 2);
662 bool observed_deflation = false;
663
664 LockStack& lock_stack = current->lock_stack();
665
666 if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
667 // Recursively fast locked
668 return;
669 }
670
671 if (lock_stack.contains(obj())) {
672 ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
673 bool entered = monitor->enter(current);
674 assert(entered, "recursive ObjectMonitor::enter must succeed");
675 cache_setter.set_monitor(monitor);
1169 bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
1170 assert(current->thread_state() == _thread_in_Java, "must be");
1171 assert(obj != nullptr, "must be");
1172 NoSafepointVerifier nsv;
1173
1174 // If quick_enter succeeds with entering, the cache should be in a valid initialized state.
1175 CacheSetter cache_setter(current, lock);
1176
1177 LockStack& lock_stack = current->lock_stack();
1178 if (lock_stack.is_full()) {
1179 // Always go into runtime if the lock stack is full.
1180 return false;
1181 }
1182
1183 const markWord mark = obj->mark();
1184
1185 #ifndef _LP64
1186 // Only for 32bit which has limited support for fast locking outside the runtime.
1187 if (lock_stack.try_recursive_enter(obj)) {
1188 // Recursive lock successful.
1189 current->inc_held_monitor_count();
1190 return true;
1191 }
1192
1193 if (mark.is_unlocked()) {
1194 markWord locked_mark = mark.set_fast_locked();
1195 if (obj->cas_set_mark(locked_mark, mark) == mark) {
1196 // Successfully fast-locked, push object to lock-stack and return.
1197 lock_stack.push(obj);
1198 current->inc_held_monitor_count();
1199 return true;
1200 }
1201 }
1202 #endif
1203
1204 if (mark.has_monitor()) {
1205 ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1206 ObjectSynchronizer::read_monitor(mark);
1207
1208 if (monitor == nullptr) {
1209 // Take the slow-path on a cache miss.
1210 return false;
1211 }
1212
1213 if (monitor->try_enter(current)) {
1214 // ObjectMonitor enter successful.
1215 cache_setter.set_monitor(monitor);
1216 current->inc_held_monitor_count();
1217 return true;
1218 }
1219 }
1220
1221 // Slow-path.
1222 return false;
1223 }
|
606 }
607 for (int inner = 1; inner < inner_spin_count; inner++) {
608 SpinPause();
609 }
610 }
611
612 if (fast_lock_try_enter(obj, lock_stack, current)) return true;
613 }
614 return false;
615 }
616
617 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
618 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
619 JavaThread* current = JavaThread::current();
620 VerifyThreadState vts(locking_thread, current);
621
622 if (obj->klass()->is_value_based()) {
623 ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
624 }
625
626 CacheSetter cache_setter(locking_thread, lock);
627
628 LockStack& lock_stack = locking_thread->lock_stack();
629
630 ObjectMonitor* monitor = nullptr;
631 if (lock_stack.contains(obj())) {
632 monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
633 bool entered = monitor->enter_for(locking_thread);
634 assert(entered, "recursive ObjectMonitor::enter_for must succeed");
635 } else {
636 // It is assumed that enter_for must enter on an object without contention.
637 monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
638 }
639
640 assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
641 cache_setter.set_monitor(monitor);
642 }
643
644 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
645 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
646 assert(current == JavaThread::current(), "must be");
647
648 if (obj->klass()->is_value_based()) {
649 ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
650 }
651
652 CacheSetter cache_setter(current, lock);
653
654 // Used when deflation is observed. Progress here requires progress
655 // from the deflator. After observing that the deflator is not
656 // making progress (after two yields), switch to sleeping.
657 SpinYield spin_yield(0, 2);
658 bool observed_deflation = false;
659
660 LockStack& lock_stack = current->lock_stack();
661
662 if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
663 // Recursively fast locked
664 return;
665 }
666
667 if (lock_stack.contains(obj())) {
668 ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
669 bool entered = monitor->enter(current);
670 assert(entered, "recursive ObjectMonitor::enter must succeed");
671 cache_setter.set_monitor(monitor);
1165 bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
1166 assert(current->thread_state() == _thread_in_Java, "must be");
1167 assert(obj != nullptr, "must be");
1168 NoSafepointVerifier nsv;
1169
1170 // If quick_enter succeeds with entering, the cache should be in a valid initialized state.
1171 CacheSetter cache_setter(current, lock);
1172
1173 LockStack& lock_stack = current->lock_stack();
1174 if (lock_stack.is_full()) {
1175 // Always go into runtime if the lock stack is full.
1176 return false;
1177 }
1178
1179 const markWord mark = obj->mark();
1180
1181 #ifndef _LP64
1182 // Only for 32bit which has limited support for fast locking outside the runtime.
1183 if (lock_stack.try_recursive_enter(obj)) {
1184 // Recursive lock successful.
1185 return true;
1186 }
1187
1188 if (mark.is_unlocked()) {
1189 markWord locked_mark = mark.set_fast_locked();
1190 if (obj->cas_set_mark(locked_mark, mark) == mark) {
1191 // Successfully fast-locked, push object to lock-stack and return.
1192 lock_stack.push(obj);
1193 return true;
1194 }
1195 }
1196 #endif
1197
1198 if (mark.has_monitor()) {
1199 ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1200 ObjectSynchronizer::read_monitor(mark);
1201
1202 if (monitor == nullptr) {
1203 // Take the slow-path on a cache miss.
1204 return false;
1205 }
1206
1207 if (monitor->try_enter(current)) {
1208 // ObjectMonitor enter successful.
1209 cache_setter.set_monitor(monitor);
1210 return true;
1211 }
1212 }
1213
1214 // Slow-path.
1215 return false;
1216 }
|