< prev index next >

src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp

Print this page

 607     }
 608     case method_entry: {
 609       THREAD->set_do_not_unlock();
 610 
 611       // Lock method if synchronized.
 612       if (METHOD->is_synchronized()) {
 613         // oop rcvr = locals[0].j.r;
 614         oop rcvr;
 615         if (METHOD->is_static()) {
 616           rcvr = METHOD->constants()->pool_holder()->java_mirror();
 617         } else {
 618           rcvr = LOCALS_OBJECT(0);
 619           VERIFY_OOP(rcvr);
 620         }
 621 
 622         // The initial monitor is ours for the taking.
 623         BasicObjectLock* mon = &istate->monitor_base()[-1];
 624         mon->set_obj(rcvr);
 625 
 626         // Traditional lightweight locking.
 627         markWord displaced = rcvr->mark().set_unlocked();
 628         mon->lock()->set_displaced_header(displaced);
 629         bool call_vm = UseHeavyMonitors;
 630         bool inc_monitor_count = true;
 631         if (call_vm || rcvr->cas_set_mark(markWord::from_pointer(mon), displaced) != displaced) {
 632           // Is it simple recursive case?
 633           if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
 634             mon->lock()->set_displaced_header(markWord::from_pointer(NULL));
 635           } else {
 636             inc_monitor_count = false;
 637             CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
 638           }
 639         }
 640         if (inc_monitor_count) {
 641           THREAD->inc_held_monitor_count();
 642         }
 643       }
 644       THREAD->clr_do_not_unlock();
 645 
 646       // Notify jvmti.
 647       // Whenever JVMTI puts a thread in interp_only_mode, method
 648       // entry/exit events are sent for that thread to track stack depth.
 649       if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
 650         CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
 651                 handle_exception);
 652       }
 653 
 654       goto run;
 655     }
 656 
 657     case popping_frame: {
 658       // returned from a java call to pop the frame, restart the call
 659       // clear the message so we don't confuse ourselves later
 660       assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
 661       istate->set_msg(no_request);
 662       THREAD->clr_pop_frame_in_process();

 706       if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
 707         // this will do the right thing even if an exception is pending.
 708         goto handle_return;
 709       }
 710       UPDATE_PC(Bytecodes::length_at(METHOD, pc));
 711       if (THREAD->has_pending_exception()) goto handle_exception;
 712       goto run;
 713     }
 714     case got_monitors: {
 715       // continue locking now that we have a monitor to use
 716       // we expect to find newly allocated monitor at the "top" of the monitor stack.
 717       oop lockee = STACK_OBJECT(-1);
 718       VERIFY_OOP(lockee);
 719       // derefing's lockee ought to provoke implicit null check
 720       // find a free monitor
 721       BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
 722       assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
 723       entry->set_obj(lockee);
 724 
 725       // traditional lightweight locking
 726       markWord displaced = lockee->mark().set_unlocked();
 727       entry->lock()->set_displaced_header(displaced);
 728       bool call_vm = UseHeavyMonitors;
 729       bool inc_monitor_count = true;
 730       if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
 731         // Is it simple recursive case?
 732         if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
 733           entry->lock()->set_displaced_header(markWord::from_pointer(NULL));
 734         } else {
 735           inc_monitor_count = false;
 736           CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
 737         }
 738       }
 739       if (inc_monitor_count) {
 740         THREAD->inc_held_monitor_count();
 741       }
 742       UPDATE_PC_AND_TOS(1, -1);
 743       goto run;
 744     }
 745     default: {
 746       fatal("Unexpected message from frame manager");
 747     }
 748   }
 749 
 750 run:
 751 
 752   DO_UPDATE_INSTRUCTION_COUNT(*pc)
 753   DEBUGGER_SINGLE_STEP_NOTIFY();
 754 #ifdef PREFETCH_OPCCODE
 755   opcode = *pc;  /* prefetch first opcode */
 756 #endif
 757 
 758 #ifndef USELABELS
 759   while (1)
 760 #endif
 761   {

1636 
1637       CASE(_monitorenter): {
1638         oop lockee = STACK_OBJECT(-1);
1639         // derefing's lockee ought to provoke implicit null check
1640         CHECK_NULL(lockee);
1641         // find a free monitor or one already allocated for this object
1642         // if we find a matching object then we need a new monitor
1643         // since this is recursive enter
1644         BasicObjectLock* limit = istate->monitor_base();
1645         BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1646         BasicObjectLock* entry = NULL;
1647         while (most_recent != limit ) {
1648           if (most_recent->obj() == NULL) entry = most_recent;
1649           else if (most_recent->obj() == lockee) break;
1650           most_recent++;
1651         }
1652         if (entry != NULL) {
1653           entry->set_obj(lockee);
1654 
1655           // traditional lightweight locking
1656           markWord displaced = lockee->mark().set_unlocked();
1657           entry->lock()->set_displaced_header(displaced);
1658           bool call_vm = UseHeavyMonitors;
1659           bool inc_monitor_count = true;
1660           if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
1661             // Is it simple recursive case?
1662             if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
1663               entry->lock()->set_displaced_header(markWord::from_pointer(NULL));
1664             } else {
1665               inc_monitor_count = false;
1666               CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1667             }
1668           }
1669           if (inc_monitor_count) {
1670             THREAD->inc_held_monitor_count();
1671           }
1672           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1673         } else {
1674           istate->set_msg(more_monitors);
1675           UPDATE_PC_AND_RETURN(0); // Re-execute
1676         }
1677       }
1678 
1679       CASE(_monitorexit): {
1680         oop lockee = STACK_OBJECT(-1);
1681         CHECK_NULL(lockee);
1682         // derefing's lockee ought to provoke implicit null check
1683         // find our monitor slot
1684         BasicObjectLock* limit = istate->monitor_base();
1685         BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1686         while (most_recent != limit ) {
1687           if ((most_recent)->obj() == lockee) {
1688             BasicLock* lock = most_recent->lock();
1689             markWord header = lock->displaced_header();
1690             most_recent->set_obj(NULL);
1691 
1692             // If it isn't recursive we either must swap old header or call the runtime
1693             bool dec_monitor_count = true;
1694             bool call_vm = UseHeavyMonitors;
1695             if (header.to_pointer() != NULL || call_vm) {
1696               markWord old_header = markWord::encode(lock);
1697               if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) {
1698                 // restore object for the slow case
1699                 most_recent->set_obj(lockee);
1700                 dec_monitor_count = false;
1701                 InterpreterRuntime::monitorexit(most_recent);
1702               }
1703             }
1704             if (dec_monitor_count) {
1705               THREAD->dec_held_monitor_count();
1706             }
1707             UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1708           }
1709           most_recent++;
1710         }
1711         // Need to throw illegal monitor state exception
1712         CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1713         ShouldNotReachHere();
1714       }
1715 
1716       /* All of the non-quick opcodes. */
1717 
1718       /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1719        *  constant pool index in the instruction.
1720        */
1721       CASE(_getfield):
1722       CASE(_nofast_getfield):
1723       CASE(_getstatic):
1724         {
1725           u2 index;
1726           ConstantPoolCacheEntry* cache;

1984         //   - klass can be fastpath allocated (e.g. does not have finalizer)
1985         //   - TLAB accepts the allocation
1986         ConstantPool* constants = istate->method()->constants();
1987         if (UseTLAB && !constants->tag_at(index).is_unresolved_klass()) {
1988           Klass* entry = constants->resolved_klass_at(index);
1989           InstanceKlass* ik = InstanceKlass::cast(entry);
1990           if (ik->is_initialized() && ik->can_be_fastpath_allocated()) {
1991             size_t obj_size = ik->size_helper();
1992             HeapWord* result = THREAD->tlab().allocate(obj_size);
1993             if (result != NULL) {
1994               // Initialize object field block:
1995               //   - if TLAB is pre-zeroed, we can skip this path
1996               //   - in debug mode, ThreadLocalAllocBuffer::allocate mangles
1997               //     this area, and we still need to initialize it
1998               if (DEBUG_ONLY(true ||) !ZeroTLAB) {
1999                 size_t hdr_size = oopDesc::header_size();
2000                 Copy::fill_to_words(result + hdr_size, obj_size - hdr_size, 0);
2001               }
2002 
2003               // Initialize header, mirrors MemAllocator.



2004               oopDesc::set_mark(result, markWord::prototype());
2005               oopDesc::set_klass_gap(result, 0);
2006               oopDesc::release_set_klass(result, ik);
2007 
2008               oop obj = cast_to_oop(result);
2009 
2010               // Must prevent reordering of stores for object initialization
2011               // with stores that publish the new object.
2012               OrderAccess::storestore();
2013               SET_STACK_OBJECT(obj, 0);
2014               UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2015             }
2016           }
2017         }
2018         // Slow case allocation
2019         CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
2020                 handle_exception);
2021         // Must prevent reordering of stores for object initialization
2022         // with stores that publish the new object.
2023         OrderAccess::storestore();
2024         SET_STACK_OBJECT(THREAD->vm_result(), 0);
2025         THREAD->set_vm_result(NULL);
2026         UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2027       }

3116       //
3117       // Another weird thing to watch for is if the method was locked
3118       // recursively and then not exited properly. This means we must
3119       // examine all the entries in reverse time(and stack) order and
3120       // unlock as we find them. If we find the method monitor before
3121       // we are at the initial entry then we should throw an exception.
3122       // It is not clear the template based interpreter does this
3123       // correctly
3124 
3125       BasicObjectLock* base = istate->monitor_base();
3126       BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
3127       bool method_unlock_needed = METHOD->is_synchronized();
3128       // We know the initial monitor was used for the method don't check that
3129       // slot in the loop
3130       if (method_unlock_needed) base--;
3131 
3132       // Check all the monitors to see they are unlocked. Install exception if found to be locked.
3133       while (end < base) {
3134         oop lockee = end->obj();
3135         if (lockee != NULL) {
3136           BasicLock* lock = end->lock();
3137           markWord header = lock->displaced_header();
3138           end->set_obj(NULL);
3139 
3140           // If it isn't recursive we either must swap old header or call the runtime
3141           bool dec_monitor_count = true;
3142           if (header.to_pointer() != NULL) {
3143             markWord old_header = markWord::encode(lock);
3144             if (lockee->cas_set_mark(header, old_header) != old_header) {
3145               // restore object for the slow case
3146               end->set_obj(lockee);
3147               dec_monitor_count = false;
3148               InterpreterRuntime::monitorexit(end);
3149             }
3150           }
3151           if (dec_monitor_count) {
3152             THREAD->dec_held_monitor_count();
3153           }
3154 
3155           // One error is plenty
3156           if (illegal_state_oop() == NULL && !suppress_error) {
3157             {
3158               // Prevent any HandleMarkCleaner from freeing our live handles
3159               HandleMark __hm(THREAD);
3160               CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3161             }
3162             assert(THREAD->has_pending_exception(), "Lost our exception!");
3163             illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3164             THREAD->clear_pending_exception();
3165           }
3166         }
3167         end++;
3168       }
3169       // Unlock the method if needed
3170       if (method_unlock_needed) {
3171         if (base->obj() == NULL) {
3172           // The method is already unlocked this is not good.
3173           if (illegal_state_oop() == NULL && !suppress_error) {

3180             illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3181             THREAD->clear_pending_exception();
3182           }
3183         } else {
3184           //
3185           // The initial monitor is always used for the method
3186           // However if that slot is no longer the oop for the method it was unlocked
3187           // and reused by something that wasn't unlocked!
3188           //
3189           // deopt can come in with rcvr dead because c2 knows
3190           // its value is preserved in the monitor. So we can't use locals[0] at all
3191           // and must use first monitor slot.
3192           //
3193           oop rcvr = base->obj();
3194           if (rcvr == NULL) {
3195             if (!suppress_error) {
3196               VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
3197               illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3198               THREAD->clear_pending_exception();
3199             }
3200           } else if (UseHeavyMonitors) {
3201             InterpreterRuntime::monitorexit(base);
3202             if (THREAD->has_pending_exception()) {
3203               if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3204               THREAD->clear_pending_exception();
3205             }
3206           } else {
3207             BasicLock* lock = base->lock();
3208             markWord header = lock->displaced_header();
3209             base->set_obj(NULL);
3210 
3211             // If it isn't recursive we either must swap old header or call the runtime
3212             bool dec_monitor_count = true;
3213             if (header.to_pointer() != NULL) {
3214               markWord old_header = markWord::encode(lock);
3215               if (rcvr->cas_set_mark(header, old_header) != old_header) {
3216                 // restore object for the slow case
3217                 base->set_obj(rcvr);
3218                 dec_monitor_count = false;
3219                 InterpreterRuntime::monitorexit(base);
3220                 if (THREAD->has_pending_exception()) {
3221                   if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3222                   THREAD->clear_pending_exception();
3223                 }
3224               }
3225             }
3226             if (dec_monitor_count) {
3227               THREAD->dec_held_monitor_count();
3228             }
3229           }
3230         }
3231       }
3232     }
3233     // Clear the do_not_unlock flag now.
3234     THREAD->clr_do_not_unlock();
3235 
3236     //
3237     // Notify jvmti/jvmdi
3238     //
3239     // NOTE: we do not notify a method_exit if we have a pending exception,
3240     // including an exception we generate for unlocking checks.  In the former
3241     // case, JVMDI has already been notified by our call for the exception handler
3242     // and in both cases as far as JVMDI is concerned we have already returned.
3243     // If we notify it again JVMDI will be all confused about how many frames
3244     // are still on the stack (4340444).
3245     //
3246     // NOTE Further! It turns out the JVMTI spec in fact expects to see
3247     // method_exit events whenever we leave an activation unless it was done
3248     // for popframe. This is nothing like jvmdi. However we are passing the

 607     }
 608     case method_entry: {
 609       THREAD->set_do_not_unlock();
 610 
 611       // Lock method if synchronized.
 612       if (METHOD->is_synchronized()) {
 613         // oop rcvr = locals[0].j.r;
 614         oop rcvr;
 615         if (METHOD->is_static()) {
 616           rcvr = METHOD->constants()->pool_holder()->java_mirror();
 617         } else {
 618           rcvr = LOCALS_OBJECT(0);
 619           VERIFY_OOP(rcvr);
 620         }
 621 
 622         // The initial monitor is ours for the taking.
 623         BasicObjectLock* mon = &istate->monitor_base()[-1];
 624         mon->set_obj(rcvr);
 625 
 626         // Traditional lightweight locking.
 627         CALL_VM(InterpreterRuntime::monitorenter(THREAD, rcvr), handle_exception);















 628       }
 629       THREAD->clr_do_not_unlock();
 630 
 631       // Notify jvmti.
 632       // Whenever JVMTI puts a thread in interp_only_mode, method
 633       // entry/exit events are sent for that thread to track stack depth.
 634       if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
 635         CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
 636                 handle_exception);
 637       }
 638 
 639       goto run;
 640     }
 641 
 642     case popping_frame: {
 643       // returned from a java call to pop the frame, restart the call
 644       // clear the message so we don't confuse ourselves later
 645       assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
 646       istate->set_msg(no_request);
 647       THREAD->clr_pop_frame_in_process();

 691       if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
 692         // this will do the right thing even if an exception is pending.
 693         goto handle_return;
 694       }
 695       UPDATE_PC(Bytecodes::length_at(METHOD, pc));
 696       if (THREAD->has_pending_exception()) goto handle_exception;
 697       goto run;
 698     }
 699     case got_monitors: {
 700       // continue locking now that we have a monitor to use
 701       // we expect to find newly allocated monitor at the "top" of the monitor stack.
 702       oop lockee = STACK_OBJECT(-1);
 703       VERIFY_OOP(lockee);
 704       // derefing's lockee ought to provoke implicit null check
 705       // find a free monitor
 706       BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
 707       assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
 708       entry->set_obj(lockee);
 709 
 710       // traditional lightweight locking
 711       CALL_VM(InterpreterRuntime::monitorenter(THREAD, lockee), handle_exception);















 712       UPDATE_PC_AND_TOS(1, -1);
 713       goto run;
 714     }
 715     default: {
 716       fatal("Unexpected message from frame manager");
 717     }
 718   }
 719 
 720 run:
 721 
 722   DO_UPDATE_INSTRUCTION_COUNT(*pc)
 723   DEBUGGER_SINGLE_STEP_NOTIFY();
 724 #ifdef PREFETCH_OPCCODE
 725   opcode = *pc;  /* prefetch first opcode */
 726 #endif
 727 
 728 #ifndef USELABELS
 729   while (1)
 730 #endif
 731   {

1606 
1607       CASE(_monitorenter): {
1608         oop lockee = STACK_OBJECT(-1);
1609         // derefing's lockee ought to provoke implicit null check
1610         CHECK_NULL(lockee);
1611         // find a free monitor or one already allocated for this object
1612         // if we find a matching object then we need a new monitor
1613         // since this is recursive enter
1614         BasicObjectLock* limit = istate->monitor_base();
1615         BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1616         BasicObjectLock* entry = NULL;
1617         while (most_recent != limit ) {
1618           if (most_recent->obj() == NULL) entry = most_recent;
1619           else if (most_recent->obj() == lockee) break;
1620           most_recent++;
1621         }
1622         if (entry != NULL) {
1623           entry->set_obj(lockee);
1624 
1625           // traditional lightweight locking
1626           CALL_VM(InterpreterRuntime::monitorenter(THREAD, lockee), handle_exception);















1627           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1628         } else {
1629           istate->set_msg(more_monitors);
1630           UPDATE_PC_AND_RETURN(0); // Re-execute
1631         }
1632       }
1633 
1634       CASE(_monitorexit): {
1635         oop lockee = STACK_OBJECT(-1);
1636         CHECK_NULL(lockee);
1637         // derefing's lockee ought to provoke implicit null check
1638         // find our monitor slot
1639         BasicObjectLock* limit = istate->monitor_base();
1640         BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1641         while (most_recent != limit ) {
1642           if ((most_recent)->obj() == lockee) {


1643             most_recent->set_obj(NULL);
1644             InterpreterRuntime::monitorexit(lockee);















1645             UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1646           }
1647           most_recent++;
1648         }
1649         // Need to throw illegal monitor state exception
1650         CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1651         ShouldNotReachHere();
1652       }
1653 
1654       /* All of the non-quick opcodes. */
1655 
1656       /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1657        *  constant pool index in the instruction.
1658        */
1659       CASE(_getfield):
1660       CASE(_nofast_getfield):
1661       CASE(_getstatic):
1662         {
1663           u2 index;
1664           ConstantPoolCacheEntry* cache;

1922         //   - klass can be fastpath allocated (e.g. does not have finalizer)
1923         //   - TLAB accepts the allocation
1924         ConstantPool* constants = istate->method()->constants();
1925         if (UseTLAB && !constants->tag_at(index).is_unresolved_klass()) {
1926           Klass* entry = constants->resolved_klass_at(index);
1927           InstanceKlass* ik = InstanceKlass::cast(entry);
1928           if (ik->is_initialized() && ik->can_be_fastpath_allocated()) {
1929             size_t obj_size = ik->size_helper();
1930             HeapWord* result = THREAD->tlab().allocate(obj_size);
1931             if (result != NULL) {
1932               // Initialize object field block:
1933               //   - if TLAB is pre-zeroed, we can skip this path
1934               //   - in debug mode, ThreadLocalAllocBuffer::allocate mangles
1935               //     this area, and we still need to initialize it
1936               if (DEBUG_ONLY(true ||) !ZeroTLAB) {
1937                 size_t hdr_size = oopDesc::header_size();
1938                 Copy::fill_to_words(result + hdr_size, obj_size - hdr_size, 0);
1939               }
1940 
1941               // Initialize header, mirrors MemAllocator.
1942 #ifdef _LP64
1943               oopDesc::release_set_mark(result, ik->prototype_header());
1944 #else
1945               oopDesc::set_mark(result, markWord::prototype());

1946               oopDesc::release_set_klass(result, ik);
1947 #endif
1948               oop obj = cast_to_oop(result);
1949 
1950               // Must prevent reordering of stores for object initialization
1951               // with stores that publish the new object.
1952               OrderAccess::storestore();
1953               SET_STACK_OBJECT(obj, 0);
1954               UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1955             }
1956           }
1957         }
1958         // Slow case allocation
1959         CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
1960                 handle_exception);
1961         // Must prevent reordering of stores for object initialization
1962         // with stores that publish the new object.
1963         OrderAccess::storestore();
1964         SET_STACK_OBJECT(THREAD->vm_result(), 0);
1965         THREAD->set_vm_result(NULL);
1966         UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1967       }

3056       //
3057       // Another weird thing to watch for is if the method was locked
3058       // recursively and then not exited properly. This means we must
3059       // examine all the entries in reverse time(and stack) order and
3060       // unlock as we find them. If we find the method monitor before
3061       // we are at the initial entry then we should throw an exception.
3062       // It is not clear the template based interpreter does this
3063       // correctly
3064 
3065       BasicObjectLock* base = istate->monitor_base();
3066       BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
3067       bool method_unlock_needed = METHOD->is_synchronized();
3068       // We know the initial monitor was used for the method don't check that
3069       // slot in the loop
3070       if (method_unlock_needed) base--;
3071 
3072       // Check all the monitors to see they are unlocked. Install exception if found to be locked.
3073       while (end < base) {
3074         oop lockee = end->obj();
3075         if (lockee != NULL) {


3076           end->set_obj(NULL);
3077           InterpreterRuntime::monitorexit(lockee);














3078 
3079           // One error is plenty
3080           if (illegal_state_oop() == NULL && !suppress_error) {
3081             {
3082               // Prevent any HandleMarkCleaner from freeing our live handles
3083               HandleMark __hm(THREAD);
3084               CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3085             }
3086             assert(THREAD->has_pending_exception(), "Lost our exception!");
3087             illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3088             THREAD->clear_pending_exception();
3089           }
3090         }
3091         end++;
3092       }
3093       // Unlock the method if needed
3094       if (method_unlock_needed) {
3095         if (base->obj() == NULL) {
3096           // The method is already unlocked this is not good.
3097           if (illegal_state_oop() == NULL && !suppress_error) {

3104             illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3105             THREAD->clear_pending_exception();
3106           }
3107         } else {
3108           //
3109           // The initial monitor is always used for the method
3110           // However if that slot is no longer the oop for the method it was unlocked
3111           // and reused by something that wasn't unlocked!
3112           //
3113           // deopt can come in with rcvr dead because c2 knows
3114           // its value is preserved in the monitor. So we can't use locals[0] at all
3115           // and must use first monitor slot.
3116           //
3117           oop rcvr = base->obj();
3118           if (rcvr == NULL) {
3119             if (!suppress_error) {
3120               VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
3121               illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3122               THREAD->clear_pending_exception();
3123             }
3124           } else {
3125             InterpreterRuntime::monitorexit(rcvr);
3126             if (THREAD->has_pending_exception()) {
3127               if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3128               THREAD->clear_pending_exception();
3129             }























3130           }
3131         }
3132       }
3133     }
3134     // Clear the do_not_unlock flag now.
3135     THREAD->clr_do_not_unlock();
3136 
3137     //
3138     // Notify jvmti/jvmdi
3139     //
3140     // NOTE: we do not notify a method_exit if we have a pending exception,
3141     // including an exception we generate for unlocking checks.  In the former
3142     // case, JVMDI has already been notified by our call for the exception handler
3143     // and in both cases as far as JVMDI is concerned we have already returned.
3144     // If we notify it again JVMDI will be all confused about how many frames
3145     // are still on the stack (4340444).
3146     //
3147     // NOTE Further! It turns out the JVMTI spec in fact expects to see
3148     // method_exit events whenever we leave an activation unless it was done
3149     // for popframe. This is nothing like jvmdi. However we are passing the
< prev index next >