1 /*
2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/cdsConfig.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "interpreter/bytecodeHistogram.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "interpreter/zero/bytecodeInterpreter.inline.hpp"
35 #include "jvm_io.h"
36 #include "logging/log.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "memory/universe.hpp"
39 #include "oops/arrayOop.inline.hpp"
40 #include "oops/constantPool.inline.hpp"
41 #include "oops/cpCache.inline.hpp"
42 #include "oops/instanceKlass.inline.hpp"
43 #include "oops/klass.inline.hpp"
44 #include "oops/method.inline.hpp"
45 #include "oops/methodCounters.hpp"
46 #include "oops/objArrayKlass.hpp"
47 #include "oops/objArrayOop.inline.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "oops/resolvedFieldEntry.hpp"
50 #include "oops/resolvedIndyEntry.hpp"
51 #include "oops/resolvedMethodEntry.hpp"
52 #include "oops/typeArrayOop.inline.hpp"
53 #include "prims/jvmtiExport.hpp"
54 #include "prims/jvmtiThreadState.hpp"
55 #include "runtime/atomicAccess.hpp"
56 #include "runtime/basicLock.inline.hpp"
57 #include "runtime/frame.inline.hpp"
58 #include "runtime/globals.hpp"
59 #include "runtime/handles.inline.hpp"
60 #include "runtime/interfaceSupport.inline.hpp"
61 #include "runtime/orderAccess.hpp"
62 #include "runtime/sharedRuntime.hpp"
63 #include "utilities/debug.hpp"
64 #include "utilities/exceptions.hpp"
65 #include "utilities/globalDefinitions.hpp"
66 #include "utilities/macros.hpp"
67
68 /*
69 * USELABELS - If using GCC, then use labels for the opcode dispatching
70 * rather -then a switch statement. This improves performance because it
71 * gives us the opportunity to have the instructions that calculate the
72 * next opcode to jump to be intermixed with the rest of the instructions
73 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
74 */
75 #undef USELABELS
76 #ifdef __GNUC__
77 /*
78 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
79 don't use the computed goto approach.
80 */
81 #ifndef ASSERT
82 #define USELABELS
83 #endif
84 #endif
85
86 #undef CASE
87 #ifdef USELABELS
88 #define CASE(opcode) opc ## opcode
89 #define DEFAULT opc_default
90 #else
91 #define CASE(opcode) case Bytecodes:: opcode
92 #define DEFAULT default
93 #endif
94
95 /*
96 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
97 * opcode before going back to the top of the while loop, rather then having
98 * the top of the while loop handle it. This provides a better opportunity
99 * for instruction scheduling. Some compilers just do this prefetch
100 * automatically. Some actually end up with worse performance if you
101 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
102 */
103 #undef PREFETCH_OPCCODE
104 #define PREFETCH_OPCCODE
105
106 JRT_ENTRY(void, at_safepoint(JavaThread* current)) {}
107 JRT_END
108
109 /*
110 Interpreter safepoint: it is expected that the interpreter will have no live
111 handles of its own creation live at an interpreter safepoint. Therefore we
112 run a HandleMarkCleaner and trash all handles allocated in the call chain
113 since the JavaCalls::call_helper invocation that initiated the chain.
114 There really shouldn't be any handles remaining to trash but this is cheap
115 in relation to a safepoint.
116 */
117 #define RETURN_SAFEPOINT \
118 if (SafepointMechanism::should_process(THREAD)) { \
119 CALL_VM(at_safepoint(THREAD), handle_exception); \
120 }
121
122 /*
123 * VM_JAVA_ERROR - Macro for throwing a java exception from
124 * the interpreter loop. Should really be a CALL_VM but there
125 * is no entry point to do the transition to vm so we just
126 * do it by hand here.
127 */
128 #define VM_JAVA_ERROR_NO_JUMP(name, msg) \
129 DECACHE_STATE(); \
130 SET_LAST_JAVA_FRAME(); \
131 { \
132 ThreadInVMfromJava trans(THREAD); \
133 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
134 } \
135 RESET_LAST_JAVA_FRAME(); \
136 CACHE_STATE();
137
138 // Normal throw of a java error.
139 #define VM_JAVA_ERROR(name, msg) \
140 VM_JAVA_ERROR_NO_JUMP(name, msg) \
141 goto handle_exception;
142
143 #ifdef PRODUCT
144 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
145 #else
146 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
147 { \
148 if (PrintBytecodeHistogram) { \
149 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
150 } \
151 if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) { \
152 BytecodeCounter::_counter_value++; \
153 if (StopInterpreterAt == BytecodeCounter::_counter_value) { \
154 os::breakpoint(); \
155 } \
156 if (TraceBytecodes) { \
157 CALL_VM((void)InterpreterRuntime::trace_bytecode(THREAD, 0, \
158 topOfStack[Interpreter::expr_index_at(1)], \
159 topOfStack[Interpreter::expr_index_at(2)]), \
160 handle_exception); \
161 } \
162 } \
163 }
164 #endif
165
166 #undef DEBUGGER_SINGLE_STEP_NOTIFY
167 #if INCLUDE_JVMTI
168 /* NOTE: (kbr) This macro must be called AFTER the PC has been
169 incremented. JvmtiExport::at_single_stepping_point() may cause a
170 breakpoint opcode to get inserted at the current PC to allow the
171 debugger to coalesce single-step events.
172
173 As a result if we call at_single_stepping_point() we refetch opcode
174 to get the current opcode. This will override any other prefetching
175 that might have occurred.
176 */
177 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
178 { \
179 if (JVMTI_ENABLED && JvmtiExport::should_post_single_step()) { \
180 DECACHE_STATE(); \
181 SET_LAST_JAVA_FRAME(); \
182 ThreadInVMfromJava trans(THREAD); \
183 JvmtiExport::at_single_stepping_point(THREAD, \
184 istate->method(), \
185 pc); \
186 RESET_LAST_JAVA_FRAME(); \
187 CACHE_STATE(); \
188 if (THREAD->has_pending_popframe() && \
189 !THREAD->pop_frame_in_process()) { \
190 goto handle_Pop_Frame; \
191 } \
192 if (THREAD->jvmti_thread_state() && \
193 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
194 goto handle_Early_Return; \
195 } \
196 opcode = *pc; \
197 } \
198 }
199 #else
200 #define DEBUGGER_SINGLE_STEP_NOTIFY()
201 #endif // INCLUDE_JVMTI
202
203 /*
204 * CONTINUE - Macro for executing the next opcode.
205 */
206 #undef CONTINUE
207 #ifdef USELABELS
208 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
209 // initialization (which is is the initialization of the table pointer...)
210 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
211 #define CONTINUE { \
212 opcode = *pc; \
213 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
214 DEBUGGER_SINGLE_STEP_NOTIFY(); \
215 DISPATCH(opcode); \
216 }
217 #else
218 #ifdef PREFETCH_OPCCODE
219 #define CONTINUE { \
220 opcode = *pc; \
221 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
222 DEBUGGER_SINGLE_STEP_NOTIFY(); \
223 continue; \
224 }
225 #else
226 #define CONTINUE { \
227 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
228 DEBUGGER_SINGLE_STEP_NOTIFY(); \
229 continue; \
230 }
231 #endif
232 #endif
233
234
235 #define UPDATE_PC(opsize) {pc += opsize; }
236 /*
237 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
238 */
239 #undef UPDATE_PC_AND_TOS
240 #define UPDATE_PC_AND_TOS(opsize, stack) \
241 {pc += opsize; MORE_STACK(stack); }
242
243 /*
244 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
245 * and executing the next opcode. It's somewhat similar to the combination
246 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
247 */
248 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
249 #ifdef USELABELS
250 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
251 pc += opsize; opcode = *pc; MORE_STACK(stack); \
252 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
253 DEBUGGER_SINGLE_STEP_NOTIFY(); \
254 DISPATCH(opcode); \
255 }
256
257 #define UPDATE_PC_AND_CONTINUE(opsize) { \
258 pc += opsize; opcode = *pc; \
259 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
260 DEBUGGER_SINGLE_STEP_NOTIFY(); \
261 DISPATCH(opcode); \
262 }
263 #else
264 #ifdef PREFETCH_OPCCODE
265 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
266 pc += opsize; opcode = *pc; MORE_STACK(stack); \
267 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
268 DEBUGGER_SINGLE_STEP_NOTIFY(); \
269 goto do_continue; \
270 }
271
272 #define UPDATE_PC_AND_CONTINUE(opsize) { \
273 pc += opsize; opcode = *pc; \
274 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
275 DEBUGGER_SINGLE_STEP_NOTIFY(); \
276 goto do_continue; \
277 }
278 #else
279 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
280 pc += opsize; MORE_STACK(stack); \
281 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
282 DEBUGGER_SINGLE_STEP_NOTIFY(); \
283 goto do_continue; \
284 }
285
286 #define UPDATE_PC_AND_CONTINUE(opsize) { \
287 pc += opsize; \
288 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
289 DEBUGGER_SINGLE_STEP_NOTIFY(); \
290 goto do_continue; \
291 }
292 #endif /* PREFETCH_OPCCODE */
293 #endif /* USELABELS */
294
295 // About to call a new method, update the save the adjusted pc and return to frame manager
296 #define UPDATE_PC_AND_RETURN(opsize) \
297 DECACHE_TOS(); \
298 istate->set_bcp(pc+opsize); \
299 return;
300
301 #define REWRITE_AT_PC(val) \
302 *pc = val;
303
304 #define METHOD istate->method()
305 #define GET_METHOD_COUNTERS(res)
306 #define DO_BACKEDGE_CHECKS(skip, branch_pc)
307
308 /*
309 * For those opcodes that need to have a GC point on a backwards branch
310 */
311
312 /*
313 * Macros for caching and flushing the interpreter state. Some local
314 * variables need to be flushed out to the frame before we do certain
315 * things (like pushing frames or becoming gc safe) and some need to
316 * be recached later (like after popping a frame). We could use one
317 * macro to cache or decache everything, but this would be less then
318 * optimal because we don't always need to cache or decache everything
319 * because some things we know are already cached or decached.
320 */
321 #undef DECACHE_TOS
322 #undef CACHE_TOS
323 #undef CACHE_PREV_TOS
324 #define DECACHE_TOS() istate->set_stack(topOfStack);
325
326 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
327
328 #undef DECACHE_PC
329 #undef CACHE_PC
330 #define DECACHE_PC() istate->set_bcp(pc);
331 #define CACHE_PC() pc = istate->bcp();
332 #define CACHE_CP() cp = istate->constants();
333 #define CACHE_LOCALS() locals = istate->locals();
334 #undef CACHE_FRAME
335 #define CACHE_FRAME()
336
337 // BCI() returns the current bytecode-index.
338 #undef BCI
339 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
340
341 /*
342 * CHECK_NULL - Macro for throwing a NullPointerException if the object
343 * passed is a null ref.
344 * On some architectures/platforms it should be possible to do this implicitly
345 */
346 #undef CHECK_NULL
347 #define CHECK_NULL_MSG(obj_, msg) \
348 if ((obj_) == nullptr) { \
349 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), (msg)); \
350 } \
351 VERIFY_OOP(obj_)
352 #define CHECK_NULL(obj_) CHECK_NULL_MSG(obj_, nullptr)
353
354 #define VMdoubleConstZero() 0.0
355 #define VMdoubleConstOne() 1.0
356 #define VMlongConstZero() (max_jlong-max_jlong)
357 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
358
359 /*
360 * Alignment
361 */
362 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
363
364 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
365 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
366
367 // Reload interpreter state after calling the VM or a possible GC
368 #define CACHE_STATE() \
369 CACHE_TOS(); \
370 CACHE_PC(); \
371 CACHE_CP(); \
372 CACHE_LOCALS();
373
374 // Call the VM with last java frame only.
375 #define CALL_VM_NAKED_LJF(func) \
376 DECACHE_STATE(); \
377 SET_LAST_JAVA_FRAME(); \
378 func; \
379 RESET_LAST_JAVA_FRAME(); \
380 CACHE_STATE();
381
382 // Call the VM. Don't check for pending exceptions.
383 #define CALL_VM_NOCHECK(func) \
384 CALL_VM_NAKED_LJF(func) \
385 if (THREAD->has_pending_popframe() && \
386 !THREAD->pop_frame_in_process()) { \
387 goto handle_Pop_Frame; \
388 } \
389 if (THREAD->jvmti_thread_state() && \
390 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
391 goto handle_Early_Return; \
392 }
393
394 // Call the VM and check for pending exceptions
395 #define CALL_VM(func, label) { \
396 CALL_VM_NOCHECK(func); \
397 if (THREAD->has_pending_exception()) goto label; \
398 }
399
400 #define MAYBE_POST_FIELD_ACCESS(obj) { \
401 if (JVMTI_ENABLED) { \
402 int* count_addr; \
403 /* Check to see if a field modification watch has been set */ \
404 /* before we take the time to call into the VM. */ \
405 count_addr = (int*)JvmtiExport::get_field_access_count_addr(); \
406 if (*count_addr > 0) { \
407 oop target; \
408 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { \
409 target = nullptr; \
410 } else { \
411 target = obj; \
412 } \
413 CALL_VM(InterpreterRuntime::post_field_access(THREAD, \
414 target, entry), \
415 handle_exception); \
416 } \
417 } \
418 }
419
420 #define MAYBE_POST_FIELD_MODIFICATION(obj) { \
421 if (JVMTI_ENABLED) { \
422 int* count_addr; \
423 /* Check to see if a field modification watch has been set */ \
424 /* before we take the time to call into the VM. */ \
425 count_addr = (int*)JvmtiExport::get_field_modification_count_addr(); \
426 if (*count_addr > 0) { \
427 oop target; \
428 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { \
429 target = nullptr; \
430 } else { \
431 target = obj; \
432 } \
433 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, \
434 target, entry, \
435 (jvalue*)STACK_SLOT(-1)), \
436 handle_exception); \
437 } \
438 } \
439 }
440
441 static inline int fast_get_type(TosState tos) {
442 switch (tos) {
443 case ztos:
444 case btos: return Bytecodes::_fast_bgetfield;
445 case ctos: return Bytecodes::_fast_cgetfield;
446 case stos: return Bytecodes::_fast_sgetfield;
447 case itos: return Bytecodes::_fast_igetfield;
448 case ltos: return Bytecodes::_fast_lgetfield;
449 case ftos: return Bytecodes::_fast_fgetfield;
450 case dtos: return Bytecodes::_fast_dgetfield;
451 case atos: return Bytecodes::_fast_agetfield;
452 default:
453 ShouldNotReachHere();
454 return -1;
455 }
456 }
457
458 static inline int fast_put_type(TosState tos) {
459 switch (tos) {
460 case ztos: return Bytecodes::_fast_zputfield;
461 case btos: return Bytecodes::_fast_bputfield;
462 case ctos: return Bytecodes::_fast_cputfield;
463 case stos: return Bytecodes::_fast_sputfield;
464 case itos: return Bytecodes::_fast_iputfield;
465 case ltos: return Bytecodes::_fast_lputfield;
466 case ftos: return Bytecodes::_fast_fputfield;
467 case dtos: return Bytecodes::_fast_dputfield;
468 case atos: return Bytecodes::_fast_aputfield;
469 default:
470 ShouldNotReachHere();
471 return -1;
472 }
473 }
474
475 /*
476 * BytecodeInterpreter::run(interpreterState istate)
477 *
478 * The real deal. This is where byte codes actually get interpreted.
479 * Basically it's a big while loop that iterates until we return from
480 * the method passed in.
481 */
482
483 // Instantiate variants of the method for future linking.
484 template void BytecodeInterpreter::run<false, false>(interpreterState istate);
485 template void BytecodeInterpreter::run<false, true>(interpreterState istate);
486 template void BytecodeInterpreter::run< true, false>(interpreterState istate);
487 template void BytecodeInterpreter::run< true, true>(interpreterState istate);
488
489 template<bool JVMTI_ENABLED, bool REWRITE_BYTECODES>
490 void BytecodeInterpreter::run(interpreterState istate) {
491 intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
492 address pc = istate->bcp();
493 jubyte opcode;
494 intptr_t* locals = istate->locals();
495 ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
496 #ifdef LOTS_OF_REGS
497 JavaThread* THREAD = istate->thread();
498 #else
499 #undef THREAD
500 #define THREAD istate->thread()
501 #endif
502
503 #ifdef ASSERT
504 assert(labs(istate->stack_base() - istate->stack_limit()) == (istate->method()->max_stack() + 1),
505 "Bad stack limit");
506 /* QQQ this should be a stack method so we don't know actual direction */
507 assert(topOfStack >= istate->stack_limit() && topOfStack < istate->stack_base(),
508 "Stack top out of range");
509
510 // Verify linkages.
511 interpreterState l = istate;
512 do {
513 assert(l == l->_self_link, "bad link");
514 l = l->_prev_link;
515 } while (l != nullptr);
516 // Screwups with stack management usually cause us to overwrite istate
517 // save a copy so we can verify it.
518 interpreterState orig = istate;
519 #endif
520
521 #ifdef USELABELS
522 const static void* const opclabels_data[256] = {
523 /* 0x00 */ &&opc_nop, &&opc_aconst_null, &&opc_iconst_m1, &&opc_iconst_0,
524 /* 0x04 */ &&opc_iconst_1, &&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
525 /* 0x08 */ &&opc_iconst_5, &&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
526 /* 0x0C */ &&opc_fconst_1, &&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
527
528 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
529 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
530 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0, &&opc_iload_1,
531 /* 0x1C */ &&opc_iload_2, &&opc_iload_3, &&opc_lload_0, &&opc_lload_1,
532
533 /* 0x20 */ &&opc_lload_2, &&opc_lload_3, &&opc_fload_0, &&opc_fload_1,
534 /* 0x24 */ &&opc_fload_2, &&opc_fload_3, &&opc_dload_0, &&opc_dload_1,
535 /* 0x28 */ &&opc_dload_2, &&opc_dload_3, &&opc_aload_0, &&opc_aload_1,
536 /* 0x2C */ &&opc_aload_2, &&opc_aload_3, &&opc_iaload, &&opc_laload,
537
538 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
539 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
540 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
541 /* 0x3C */ &&opc_istore_1, &&opc_istore_2, &&opc_istore_3, &&opc_lstore_0,
542
543 /* 0x40 */ &&opc_lstore_1, &&opc_lstore_2, &&opc_lstore_3, &&opc_fstore_0,
544 /* 0x44 */ &&opc_fstore_1, &&opc_fstore_2, &&opc_fstore_3, &&opc_dstore_0,
545 /* 0x48 */ &&opc_dstore_1, &&opc_dstore_2, &&opc_dstore_3, &&opc_astore_0,
546 /* 0x4C */ &&opc_astore_1, &&opc_astore_2, &&opc_astore_3, &&opc_iastore,
547
548 /* 0x50 */ &&opc_lastore, &&opc_fastore, &&opc_dastore, &&opc_aastore,
549 /* 0x54 */ &&opc_bastore, &&opc_castore, &&opc_sastore, &&opc_pop,
550 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
551 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1, &&opc_dup2_x2, &&opc_swap,
552
553 /* 0x60 */ &&opc_iadd, &&opc_ladd, &&opc_fadd, &&opc_dadd,
554 /* 0x64 */ &&opc_isub, &&opc_lsub, &&opc_fsub, &&opc_dsub,
555 /* 0x68 */ &&opc_imul, &&opc_lmul, &&opc_fmul, &&opc_dmul,
556 /* 0x6C */ &&opc_idiv, &&opc_ldiv, &&opc_fdiv, &&opc_ddiv,
557
558 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem, &&opc_drem,
559 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg, &&opc_dneg,
560 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr, &&opc_lshr,
561 /* 0x7C */ &&opc_iushr, &&opc_lushr, &&opc_iand, &&opc_land,
562
563 /* 0x80 */ &&opc_ior, &&opc_lor, &&opc_ixor, &&opc_lxor,
564 /* 0x84 */ &&opc_iinc, &&opc_i2l, &&opc_i2f, &&opc_i2d,
565 /* 0x88 */ &&opc_l2i, &&opc_l2f, &&opc_l2d, &&opc_f2i,
566 /* 0x8C */ &&opc_f2l, &&opc_f2d, &&opc_d2i, &&opc_d2l,
567
568 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
569 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl, &&opc_fcmpg, &&opc_dcmpl,
570 /* 0x98 */ &&opc_dcmpg, &&opc_ifeq, &&opc_ifne, &&opc_iflt,
571 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
572
573 /* 0xA0 */ &&opc_if_icmpne, &&opc_if_icmplt, &&opc_if_icmpge, &&opc_if_icmpgt,
574 /* 0xA4 */ &&opc_if_icmple, &&opc_if_acmpeq, &&opc_if_acmpne, &&opc_goto,
575 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch, &&opc_lookupswitch,
576 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
577
578 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
579 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual, &&opc_invokespecial,
580 /* 0xB8 */ &&opc_invokestatic, &&opc_invokeinterface, &&opc_invokedynamic, &&opc_new,
581 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
582
583 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
584 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
585 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_fast_agetfield,
586 /* 0xCC */ &&opc_default, &&opc_fast_bgetfield, &&opc_fast_cgetfield, &&opc_fast_dgetfield,
587
588 /* 0xD0 */ &&opc_fast_fgetfield, &&opc_fast_igetfield, &&opc_fast_lgetfield, &&opc_fast_sgetfield,
589 /* 0xD4 */ &&opc_fast_aputfield, &&opc_default, &&opc_fast_bputfield, &&opc_fast_zputfield,
590 /* 0xD8 */ &&opc_fast_cputfield, &&opc_fast_dputfield, &&opc_fast_fputfield, &&opc_fast_iputfield,
591 /* 0xDC */ &&opc_fast_lputfield, &&opc_fast_sputfield, &&opc_fast_aload_0, &&opc_fast_iaccess_0,
592
593 /* 0xE0 */ &&opc_fast_aaccess_0, &&opc_fast_faccess_0, &&opc_fast_iload, &&opc_fast_iload2,
594 /* 0xE4 */ &&opc_fast_icaload, &&opc_fast_invokevfinal, &&opc_default, &&opc_default,
595 /* 0xE8 */ &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, &&opc_invokehandle,
596 /* 0xEC */ &&opc_nofast_getfield, &&opc_nofast_putfield, &&opc_nofast_aload_0, &&opc_nofast_iload,
597
598 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
599 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
600 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
601 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
602 };
603 uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
604 #endif /* USELABELS */
605
606 switch (istate->msg()) {
607 case initialize: {
608 ShouldNotCallThis();
609 return;
610 }
611 case method_entry: {
612 THREAD->set_do_not_unlock_if_synchronized(true);
613
614 // Lock method if synchronized.
615 if (METHOD->is_synchronized()) {
616 // oop rcvr = locals[0].j.r;
617 oop rcvr;
618 if (METHOD->is_static()) {
619 rcvr = METHOD->constants()->pool_holder()->java_mirror();
620 } else {
621 rcvr = LOCALS_OBJECT(0);
622 VERIFY_OOP(rcvr);
623 }
624
625 // The initial monitor is ours for the taking.
626 BasicObjectLock* mon = &istate->monitor_base()[-1];
627 mon->set_obj(rcvr);
628 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
629 }
630 THREAD->set_do_not_unlock_if_synchronized(false);
631
632 // Notify jvmti.
633 // Whenever JVMTI puts a thread in interp_only_mode, method
634 // entry/exit events are sent for that thread to track stack depth.
635 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
636 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
637 handle_exception);
638 }
639
640 goto run;
641 }
642
643 case popping_frame: {
644 // returned from a java call to pop the frame, restart the call
645 // clear the message so we don't confuse ourselves later
646 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
647 istate->set_msg(no_request);
648 THREAD->clr_pop_frame_in_process();
649 goto run;
650 }
651
652 case method_resume: {
653 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
654 // resume
655 os::breakpoint();
656 }
657 // returned from a java call, continue executing.
658 if (THREAD->has_pending_popframe() && !THREAD->pop_frame_in_process()) {
659 goto handle_Pop_Frame;
660 }
661 if (THREAD->jvmti_thread_state() &&
662 THREAD->jvmti_thread_state()->is_earlyret_pending()) {
663 goto handle_Early_Return;
664 }
665
666 if (THREAD->has_pending_exception()) goto handle_exception;
667 // Update the pc by the saved amount of the invoke bytecode size
668 UPDATE_PC(istate->bcp_advance());
669 goto run;
670 }
671
672 case deopt_resume2: {
673 // Returned from an opcode that will reexecute. Deopt was
674 // a result of a PopFrame request.
675 //
676 goto run;
677 }
678
679 case deopt_resume: {
680 // Returned from an opcode that has completed. The stack has
681 // the result all we need to do is skip across the bytecode
682 // and continue (assuming there is no exception pending)
683 //
684 // compute continuation length
685 //
686 // Note: it is possible to deopt at a return_register_finalizer opcode
687 // because this requires entering the vm to do the registering. While the
688 // opcode is complete we can't advance because there are no more opcodes
689 // much like trying to deopt at a poll return. In that has we simply
690 // get out of here
691 //
692 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
693 // this will do the right thing even if an exception is pending.
694 goto handle_return;
695 }
696 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
697 if (THREAD->has_pending_exception()) goto handle_exception;
698 goto run;
699 }
700 case got_monitors: {
701 // continue locking now that we have a monitor to use
702 // we expect to find newly allocated monitor at the "top" of the monitor stack.
703 oop lockee = STACK_OBJECT(-1);
704 VERIFY_OOP(lockee);
705 // derefing's lockee ought to provoke implicit null check
706 // find a free monitor
707 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
708 assert(entry->obj() == nullptr, "Frame manager didn't allocate the monitor");
709 entry->set_obj(lockee);
710 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
711 UPDATE_PC_AND_TOS(1, -1);
712 goto run;
713 }
714 default: {
715 fatal("Unexpected message from frame manager");
716 }
717 }
718
719 run:
720
721 DO_UPDATE_INSTRUCTION_COUNT(*pc)
722 DEBUGGER_SINGLE_STEP_NOTIFY();
723 #ifdef PREFETCH_OPCCODE
724 opcode = *pc; /* prefetch first opcode */
725 #endif
726
727 #ifndef USELABELS
728 while (1)
729 #endif
730 {
731 #ifndef PREFETCH_OPCCODE
732 opcode = *pc;
733 #endif
734 // Seems like this happens twice per opcode. At worst this is only
735 // need at entry to the loop.
736 // DEBUGGER_SINGLE_STEP_NOTIFY();
737 /* Using this labels avoids double breakpoints when quickening and
738 * when returning from transition frames.
739 */
740 opcode_switch:
741 assert(istate == orig, "Corrupted istate");
742 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
743 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
744 assert(topOfStack < istate->stack_base(), "Stack underrun");
745
746 #ifdef USELABELS
747 DISPATCH(opcode);
748 #else
749 switch (opcode)
750 #endif
751 {
752 CASE(_nop):
753 UPDATE_PC_AND_CONTINUE(1);
754
755 /* Push miscellaneous constants onto the stack. */
756
757 CASE(_aconst_null):
758 SET_STACK_OBJECT(nullptr, 0);
759 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
760
761 #undef OPC_CONST_n
762 #define OPC_CONST_n(opcode, const_type, value) \
763 CASE(opcode): \
764 SET_STACK_ ## const_type(value, 0); \
765 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
766
767 OPC_CONST_n(_iconst_m1, INT, -1);
768 OPC_CONST_n(_iconst_0, INT, 0);
769 OPC_CONST_n(_iconst_1, INT, 1);
770 OPC_CONST_n(_iconst_2, INT, 2);
771 OPC_CONST_n(_iconst_3, INT, 3);
772 OPC_CONST_n(_iconst_4, INT, 4);
773 OPC_CONST_n(_iconst_5, INT, 5);
774 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
775 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
776 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
777
778 #undef OPC_CONST2_n
779 #define OPC_CONST2_n(opcname, value, key, kind) \
780 CASE(_##opcname): \
781 { \
782 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
783 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
784 }
785 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
786 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
787 OPC_CONST2_n(lconst_0, Zero, long, LONG);
788 OPC_CONST2_n(lconst_1, One, long, LONG);
789
790 /* Load constant from constant pool: */
791
792 /* Push a 1-byte signed integer value onto the stack. */
793 CASE(_bipush):
794 SET_STACK_INT((jbyte)(pc[1]), 0);
795 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
796
797 /* Push a 2-byte signed integer constant onto the stack. */
798 CASE(_sipush):
799 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
800 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
801
802 /* load from local variable */
803
804 CASE(_aload):
805 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
806 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
807 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
808
809 CASE(_iload):
810 {
811 if (REWRITE_BYTECODES) {
812 // Attempt to rewrite iload, iload -> fast_iload2
813 // iload, caload -> fast_icaload
814 // Normal iloads will be rewritten to fast_iload to avoid checking again.
815 switch (*(pc + 2)) {
816 case Bytecodes::_fast_iload:
817 REWRITE_AT_PC(Bytecodes::_fast_iload2);
818 break;
819 case Bytecodes::_caload:
820 REWRITE_AT_PC(Bytecodes::_fast_icaload);
821 break;
822 case Bytecodes::_iload:
823 // Wait until rewritten to _fast_iload.
824 break;
825 default:
826 // Last iload in a (potential) series, don't check again.
827 REWRITE_AT_PC(Bytecodes::_fast_iload);
828 }
829 }
830 // Normal iload handling.
831 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
832 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
833 }
834
835 CASE(_nofast_iload):
836 {
837 // Normal, non-rewritable iload handling.
838 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
839 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
840 }
841
842 CASE(_fast_iload):
843 CASE(_fload):
844 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
845 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
846
847 CASE(_fast_iload2):
848 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
849 SET_STACK_SLOT(LOCALS_SLOT(pc[3]), 1);
850 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
851
852 CASE(_lload):
853 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
854 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
855
856 CASE(_dload):
857 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
858 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
859
860 #undef OPC_LOAD_n
861 #define OPC_LOAD_n(num) \
862 CASE(_iload_##num): \
863 CASE(_fload_##num): \
864 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
865 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
866 \
867 CASE(_lload_##num): \
868 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
869 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
870 CASE(_dload_##num): \
871 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
872 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
873
874 OPC_LOAD_n(0);
875 OPC_LOAD_n(1);
876 OPC_LOAD_n(2);
877 OPC_LOAD_n(3);
878
879 #undef OPC_ALOAD_n
880 #define OPC_ALOAD_n(num) \
881 CASE(_aload_##num): { \
882 oop obj = LOCALS_OBJECT(num); \
883 VERIFY_OOP(obj); \
884 SET_STACK_OBJECT(obj, 0); \
885 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
886 }
887
888 CASE(_aload_0):
889 {
890 /* Maybe rewrite if following bytecode is one of the supported _fast_Xgetfield bytecodes. */
891 if (REWRITE_BYTECODES) {
892 switch (*(pc + 1)) {
893 case Bytecodes::_fast_agetfield:
894 REWRITE_AT_PC(Bytecodes::_fast_aaccess_0);
895 break;
896 case Bytecodes::_fast_fgetfield:
897 REWRITE_AT_PC(Bytecodes::_fast_faccess_0);
898 break;
899 case Bytecodes::_fast_igetfield:
900 REWRITE_AT_PC(Bytecodes::_fast_iaccess_0);
901 break;
902 case Bytecodes::_getfield:
903 case Bytecodes::_nofast_getfield: {
904 /* Otherwise, do nothing here, wait until/if it gets rewritten to _fast_Xgetfield.
905 * Unfortunately, this punishes volatile field access, because it never gets
906 * rewritten. */
907 break;
908 }
909 default:
910 REWRITE_AT_PC(Bytecodes::_fast_aload_0);
911 break;
912 }
913 }
914 // Normal aload_0 handling.
915 VERIFY_OOP(LOCALS_OBJECT(0));
916 SET_STACK_OBJECT(LOCALS_OBJECT(0), 0);
917 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
918 }
919
920 CASE(_nofast_aload_0):
921 {
922 // Normal, non-rewritable aload_0 handling.
923 VERIFY_OOP(LOCALS_OBJECT(0));
924 SET_STACK_OBJECT(LOCALS_OBJECT(0), 0);
925 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
926 }
927
928 OPC_ALOAD_n(1);
929 OPC_ALOAD_n(2);
930 OPC_ALOAD_n(3);
931
932 /* store to a local variable */
933
934 CASE(_astore):
935 astore(topOfStack, -1, locals, pc[1]);
936 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
937
938 CASE(_istore):
939 CASE(_fstore):
940 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
941 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
942
943 CASE(_lstore):
944 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
945 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
946
947 CASE(_dstore):
948 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
949 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
950
951 CASE(_wide): {
952 uint16_t reg = Bytes::get_Java_u2(pc + 2);
953
954 opcode = pc[1];
955
956 // Wide and it's sub-bytecode are counted as separate instructions. If we
957 // don't account for this here, the bytecode trace skips the next bytecode.
958 DO_UPDATE_INSTRUCTION_COUNT(opcode);
959
960 switch(opcode) {
961 case Bytecodes::_aload:
962 VERIFY_OOP(LOCALS_OBJECT(reg));
963 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
964 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
965
966 case Bytecodes::_iload:
967 case Bytecodes::_fload:
968 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
969 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
970
971 case Bytecodes::_lload:
972 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
973 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
974
975 case Bytecodes::_dload:
976 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
977 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
978
979 case Bytecodes::_astore:
980 astore(topOfStack, -1, locals, reg);
981 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
982
983 case Bytecodes::_istore:
984 case Bytecodes::_fstore:
985 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
986 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
987
988 case Bytecodes::_lstore:
989 SET_LOCALS_LONG(STACK_LONG(-1), reg);
990 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
991
992 case Bytecodes::_dstore:
993 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
994 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
995
996 case Bytecodes::_iinc: {
997 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
998 // Be nice to see what this generates.... QQQ
999 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
1000 UPDATE_PC_AND_CONTINUE(6);
1001 }
1002 case Bytecodes::_ret:
1003 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
1004 UPDATE_PC_AND_CONTINUE(0);
1005 default:
1006 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode");
1007 }
1008 }
1009
1010
1011 #undef OPC_STORE_n
1012 #define OPC_STORE_n(num) \
1013 CASE(_astore_##num): \
1014 astore(topOfStack, -1, locals, num); \
1015 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1016 CASE(_istore_##num): \
1017 CASE(_fstore_##num): \
1018 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
1019 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1020
1021 OPC_STORE_n(0);
1022 OPC_STORE_n(1);
1023 OPC_STORE_n(2);
1024 OPC_STORE_n(3);
1025
1026 #undef OPC_DSTORE_n
1027 #define OPC_DSTORE_n(num) \
1028 CASE(_dstore_##num): \
1029 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
1030 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1031 CASE(_lstore_##num): \
1032 SET_LOCALS_LONG(STACK_LONG(-1), num); \
1033 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1034
1035 OPC_DSTORE_n(0);
1036 OPC_DSTORE_n(1);
1037 OPC_DSTORE_n(2);
1038 OPC_DSTORE_n(3);
1039
1040 /* stack pop, dup, and insert opcodes */
1041
1042
1043 CASE(_pop): /* Discard the top item on the stack */
1044 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1045
1046
1047 CASE(_pop2): /* Discard the top 2 items on the stack */
1048 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1049
1050
1051 CASE(_dup): /* Duplicate the top item on the stack */
1052 dup(topOfStack);
1053 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1054
1055 CASE(_dup2): /* Duplicate the top 2 items on the stack */
1056 dup2(topOfStack);
1057 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1058
1059 CASE(_dup_x1): /* insert top word two down */
1060 dup_x1(topOfStack);
1061 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1062
1063 CASE(_dup_x2): /* insert top word three down */
1064 dup_x2(topOfStack);
1065 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1066
1067 CASE(_dup2_x1): /* insert top 2 slots three down */
1068 dup2_x1(topOfStack);
1069 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1070
1071 CASE(_dup2_x2): /* insert top 2 slots four down */
1072 dup2_x2(topOfStack);
1073 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1074
1075 CASE(_swap): { /* swap top two elements on the stack */
1076 swap(topOfStack);
1077 UPDATE_PC_AND_CONTINUE(1);
1078 }
1079
1080 /* Perform various binary integer operations */
1081
1082 #undef OPC_INT_BINARY
1083 #define OPC_INT_BINARY(opcname, opname, test) \
1084 CASE(_i##opcname): \
1085 if (test && (STACK_INT(-1) == 0)) { \
1086 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1087 "/ by zero"); \
1088 } \
1089 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1090 STACK_INT(-1)), \
1091 -2); \
1092 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1093 CASE(_l##opcname): \
1094 { \
1095 if (test) { \
1096 jlong l1 = STACK_LONG(-1); \
1097 if (VMlongEqz(l1)) { \
1098 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1099 "/ by long zero"); \
1100 } \
1101 } \
1102 /* First long at (-1,-2) next long at (-3,-4) */ \
1103 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
1104 STACK_LONG(-1)), \
1105 -3); \
1106 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1107 }
1108
1109 OPC_INT_BINARY(add, Add, 0);
1110 OPC_INT_BINARY(sub, Sub, 0);
1111 OPC_INT_BINARY(mul, Mul, 0);
1112 OPC_INT_BINARY(and, And, 0);
1113 OPC_INT_BINARY(or, Or, 0);
1114 OPC_INT_BINARY(xor, Xor, 0);
1115 OPC_INT_BINARY(div, Div, 1);
1116 OPC_INT_BINARY(rem, Rem, 1);
1117
1118
1119 /* Perform various binary floating number operations */
1120 /* On some machine/platforms/compilers div zero check can be implicit */
1121
1122 #undef OPC_FLOAT_BINARY
1123 #define OPC_FLOAT_BINARY(opcname, opname) \
1124 CASE(_d##opcname): { \
1125 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
1126 STACK_DOUBLE(-1)), \
1127 -3); \
1128 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1129 } \
1130 CASE(_f##opcname): \
1131 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
1132 STACK_FLOAT(-1)), \
1133 -2); \
1134 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1135
1136
1137 OPC_FLOAT_BINARY(add, Add);
1138 OPC_FLOAT_BINARY(sub, Sub);
1139 OPC_FLOAT_BINARY(mul, Mul);
1140 OPC_FLOAT_BINARY(div, Div);
1141 OPC_FLOAT_BINARY(rem, Rem);
1142
1143 /* Shift operations
1144 * Shift left int and long: ishl, lshl
1145 * Logical shift right int and long w/zero extension: iushr, lushr
1146 * Arithmetic shift right int and long w/sign extension: ishr, lshr
1147 */
1148
1149 #undef OPC_SHIFT_BINARY
1150 #define OPC_SHIFT_BINARY(opcname, opname) \
1151 CASE(_i##opcname): \
1152 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1153 STACK_INT(-1)), \
1154 -2); \
1155 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1156 CASE(_l##opcname): \
1157 { \
1158 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
1159 STACK_INT(-1)), \
1160 -2); \
1161 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1162 }
1163
1164 OPC_SHIFT_BINARY(shl, Shl);
1165 OPC_SHIFT_BINARY(shr, Shr);
1166 OPC_SHIFT_BINARY(ushr, Ushr);
1167
1168 /* Increment local variable by constant */
1169 CASE(_iinc):
1170 {
1171 // locals[pc[1]].j.i += (jbyte)(pc[2]);
1172 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
1173 UPDATE_PC_AND_CONTINUE(3);
1174 }
1175
1176 /* negate the value on the top of the stack */
1177
1178 CASE(_ineg):
1179 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
1180 UPDATE_PC_AND_CONTINUE(1);
1181
1182 CASE(_fneg):
1183 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
1184 UPDATE_PC_AND_CONTINUE(1);
1185
1186 CASE(_lneg):
1187 {
1188 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
1189 UPDATE_PC_AND_CONTINUE(1);
1190 }
1191
1192 CASE(_dneg):
1193 {
1194 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
1195 UPDATE_PC_AND_CONTINUE(1);
1196 }
1197
1198 /* Conversion operations */
1199
1200 CASE(_i2f): /* convert top of stack int to float */
1201 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
1202 UPDATE_PC_AND_CONTINUE(1);
1203
1204 CASE(_i2l): /* convert top of stack int to long */
1205 {
1206 // this is ugly QQQ
1207 jlong r = VMint2Long(STACK_INT(-1));
1208 MORE_STACK(-1); // Pop
1209 SET_STACK_LONG(r, 1);
1210
1211 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1212 }
1213
1214 CASE(_i2d): /* convert top of stack int to double */
1215 {
1216 // this is ugly QQQ (why cast to jlong?? )
1217 jdouble r = (jlong)STACK_INT(-1);
1218 MORE_STACK(-1); // Pop
1219 SET_STACK_DOUBLE(r, 1);
1220
1221 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1222 }
1223
1224 CASE(_l2i): /* convert top of stack long to int */
1225 {
1226 jint r = VMlong2Int(STACK_LONG(-1));
1227 MORE_STACK(-2); // Pop
1228 SET_STACK_INT(r, 0);
1229 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1230 }
1231
1232 CASE(_l2f): /* convert top of stack long to float */
1233 {
1234 jlong r = STACK_LONG(-1);
1235 MORE_STACK(-2); // Pop
1236 SET_STACK_FLOAT(VMlong2Float(r), 0);
1237 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1238 }
1239
1240 CASE(_l2d): /* convert top of stack long to double */
1241 {
1242 jlong r = STACK_LONG(-1);
1243 MORE_STACK(-2); // Pop
1244 SET_STACK_DOUBLE(VMlong2Double(r), 1);
1245 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1246 }
1247
1248 CASE(_f2i): /* Convert top of stack float to int */
1249 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
1250 UPDATE_PC_AND_CONTINUE(1);
1251
1252 CASE(_f2l): /* convert top of stack float to long */
1253 {
1254 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
1255 MORE_STACK(-1); // POP
1256 SET_STACK_LONG(r, 1);
1257 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1258 }
1259
1260 CASE(_f2d): /* convert top of stack float to double */
1261 {
1262 jfloat f;
1263 jdouble r;
1264 f = STACK_FLOAT(-1);
1265 r = (jdouble) f;
1266 MORE_STACK(-1); // POP
1267 SET_STACK_DOUBLE(r, 1);
1268 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1269 }
1270
1271 CASE(_d2i): /* convert top of stack double to int */
1272 {
1273 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
1274 MORE_STACK(-2);
1275 SET_STACK_INT(r1, 0);
1276 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1277 }
1278
1279 CASE(_d2f): /* convert top of stack double to float */
1280 {
1281 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
1282 MORE_STACK(-2);
1283 SET_STACK_FLOAT(r1, 0);
1284 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1285 }
1286
1287 CASE(_d2l): /* convert top of stack double to long */
1288 {
1289 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
1290 MORE_STACK(-2);
1291 SET_STACK_LONG(r1, 1);
1292 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1293 }
1294
1295 CASE(_i2b):
1296 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
1297 UPDATE_PC_AND_CONTINUE(1);
1298
1299 CASE(_i2c):
1300 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
1301 UPDATE_PC_AND_CONTINUE(1);
1302
1303 CASE(_i2s):
1304 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
1305 UPDATE_PC_AND_CONTINUE(1);
1306
1307 /* comparison operators */
1308
1309
1310 #define COMPARISON_OP(name, comparison) \
1311 CASE(_if_icmp##name): { \
1312 int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \
1313 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1314 address branch_pc = pc; \
1315 UPDATE_PC_AND_TOS(skip, -2); \
1316 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1317 CONTINUE; \
1318 } \
1319 CASE(_if##name): { \
1320 int skip = (STACK_INT(-1) comparison 0) \
1321 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1322 address branch_pc = pc; \
1323 UPDATE_PC_AND_TOS(skip, -1); \
1324 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1325 CONTINUE; \
1326 }
1327
1328 #define COMPARISON_OP2(name, comparison) \
1329 COMPARISON_OP(name, comparison) \
1330 CASE(_if_acmp##name): { \
1331 int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \
1332 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1333 address branch_pc = pc; \
1334 UPDATE_PC_AND_TOS(skip, -2); \
1335 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1336 CONTINUE; \
1337 }
1338
1339 #define NULL_COMPARISON_NOT_OP(name) \
1340 CASE(_if##name): { \
1341 int skip = (!(STACK_OBJECT(-1) == nullptr)) \
1342 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1343 address branch_pc = pc; \
1344 UPDATE_PC_AND_TOS(skip, -1); \
1345 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1346 CONTINUE; \
1347 }
1348
1349 #define NULL_COMPARISON_OP(name) \
1350 CASE(_if##name): { \
1351 int skip = ((STACK_OBJECT(-1) == nullptr)) \
1352 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1353 address branch_pc = pc; \
1354 UPDATE_PC_AND_TOS(skip, -1); \
1355 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1356 CONTINUE; \
1357 }
1358 COMPARISON_OP(lt, <);
1359 COMPARISON_OP(gt, >);
1360 COMPARISON_OP(le, <=);
1361 COMPARISON_OP(ge, >=);
1362 COMPARISON_OP2(eq, ==); /* include ref comparison */
1363 COMPARISON_OP2(ne, !=); /* include ref comparison */
1364 NULL_COMPARISON_OP(null);
1365 NULL_COMPARISON_NOT_OP(nonnull);
1366
1367 /* Goto pc at specified offset in switch table. */
1368
1369 CASE(_tableswitch): {
1370 jint* lpc = (jint*)VMalignWordUp(pc+1);
1371 int32_t key = STACK_INT(-1);
1372 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
1373 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
1374 int32_t skip;
1375 key -= low;
1376 if (((uint32_t) key > (uint32_t)(high - low))) {
1377 skip = Bytes::get_Java_u4((address)&lpc[0]);
1378 } else {
1379 skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
1380 }
1381 // Does this really need a full backedge check (osr)?
1382 address branch_pc = pc;
1383 UPDATE_PC_AND_TOS(skip, -1);
1384 DO_BACKEDGE_CHECKS(skip, branch_pc);
1385 CONTINUE;
1386 }
1387
1388 /* Goto pc whose table entry matches specified key. */
1389
1390 CASE(_lookupswitch): {
1391 jint* lpc = (jint*)VMalignWordUp(pc+1);
1392 int32_t key = STACK_INT(-1);
1393 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
1394 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
1395 while (--npairs >= 0) {
1396 lpc += 2;
1397 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
1398 skip = Bytes::get_Java_u4((address)&lpc[1]);
1399 break;
1400 }
1401 }
1402 address branch_pc = pc;
1403 UPDATE_PC_AND_TOS(skip, -1);
1404 DO_BACKEDGE_CHECKS(skip, branch_pc);
1405 CONTINUE;
1406 }
1407
1408 CASE(_fcmpl):
1409 CASE(_fcmpg):
1410 {
1411 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
1412 STACK_FLOAT(-1),
1413 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
1414 -2);
1415 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1416 }
1417
1418 CASE(_dcmpl):
1419 CASE(_dcmpg):
1420 {
1421 int r = VMdoubleCompare(STACK_DOUBLE(-3),
1422 STACK_DOUBLE(-1),
1423 (opcode == Bytecodes::_dcmpl ? -1 : 1));
1424 MORE_STACK(-4); // Pop
1425 SET_STACK_INT(r, 0);
1426 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1427 }
1428
1429 CASE(_lcmp):
1430 {
1431 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
1432 MORE_STACK(-4);
1433 SET_STACK_INT(r, 0);
1434 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1435 }
1436
1437
1438 /* Return from a method */
1439
1440 CASE(_areturn):
1441 CASE(_ireturn):
1442 CASE(_freturn):
1443 CASE(_lreturn):
1444 CASE(_dreturn):
1445 CASE(_return): {
1446 // Allow a safepoint before returning to frame manager.
1447 RETURN_SAFEPOINT;
1448 goto handle_return;
1449 }
1450
1451 CASE(_return_register_finalizer): {
1452 oop rcvr = LOCALS_OBJECT(0);
1453 VERIFY_OOP(rcvr);
1454 if (rcvr->klass()->has_finalizer()) {
1455 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
1456 }
1457 goto handle_return;
1458 }
1459
1460 /* Array access byte-codes */
1461
1462 #define ARRAY_INDEX_CHECK(arrObj, index) \
1463 /* Two integers, the additional message, and the null-terminator */ \
1464 char message[2 * jintAsStringSize + 33]; \
1465 CHECK_NULL(arrObj); \
1466 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
1467 jio_snprintf(message, sizeof(message), \
1468 "Index %d out of bounds for length %d", \
1469 index, arrObj->length()); \
1470 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
1471 message); \
1472 }
1473
1474 /* Every array access byte-code starts out like this */
1475 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
1476 #define ARRAY_INTRO(arrayOff) \
1477 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
1478 jint index = STACK_INT(arrayOff + 1); \
1479 ARRAY_INDEX_CHECK(arrObj, index)
1480
1481 /* 32-bit loads. These handle conversion from < 32-bit types */
1482 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
1483 { \
1484 ARRAY_INTRO(-2); \
1485 (void)extra; \
1486 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
1487 -2); \
1488 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1489 }
1490
1491 /* 64-bit loads */
1492 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
1493 { \
1494 ARRAY_INTRO(-2); \
1495 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
1496 (void)extra; \
1497 UPDATE_PC_AND_CONTINUE(1); \
1498 }
1499
1500 CASE(_iaload):
1501 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
1502 CASE(_faload):
1503 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1504 CASE(_aaload): {
1505 ARRAY_INTRO(-2);
1506 if (arrObj->is_flatArray()) {
1507 CALL_VM(InterpreterRuntime::flat_array_load(THREAD, (objArrayOop) arrObj, index), handle_exception);
1508 SET_STACK_OBJECT(THREAD->vm_result_oop(), -2);
1509 THREAD->set_vm_result_oop(nullptr);
1510 } else {
1511 SET_STACK_OBJECT(((refArrayOop) arrObj)->obj_at(index), -2);
1512 }
1513 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1514 }
1515 CASE(_baload):
1516 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
1517 CASE(_caload):
1518 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
1519 CASE(_saload):
1520 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
1521 CASE(_laload):
1522 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
1523 CASE(_daload):
1524 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1525
1526 CASE(_fast_icaload): {
1527 // Custom fast access for iload,caload pair.
1528 arrayOop arrObj = (arrayOop) STACK_OBJECT(-1);
1529 jint index = LOCALS_INT(pc[1]);
1530 ARRAY_INDEX_CHECK(arrObj, index);
1531 SET_STACK_INT(*(jchar *)(((address) arrObj->base(T_CHAR)) + index * sizeof(jchar)), -1);
1532 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 0);
1533 }
1534
1535 /* 32-bit stores. These handle conversion to < 32-bit types */
1536 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
1537 { \
1538 ARRAY_INTRO(-3); \
1539 (void)extra; \
1540 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1541 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
1542 }
1543
1544 /* 64-bit stores */
1545 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
1546 { \
1547 ARRAY_INTRO(-4); \
1548 (void)extra; \
1549 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1550 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
1551 }
1552
1553 CASE(_iastore):
1554 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
1555 CASE(_fastore):
1556 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1557 /*
1558 * This one looks different because of the assignability check
1559 */
1560 CASE(_aastore): {
1561 oop rhsObject = STACK_OBJECT(-1);
1562 VERIFY_OOP(rhsObject);
1563 ARRAY_INTRO( -3);
1564 // arrObj, index are set
1565 if (rhsObject != nullptr) {
1566 /* Check assignability of rhsObject into arrObj */
1567 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
1568 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
1569 //
1570 // Check for compatibility. This check must not GC!!
1571 // Seems way more expensive now that we must dispatch
1572 //
1573 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
1574 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "");
1575 }
1576 } else if (arrObj->is_null_free_array()) {
1577 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "Cannot store null in a null-restricted array");
1578 }
1579 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
1580 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1581 }
1582 CASE(_bastore): {
1583 ARRAY_INTRO(-3);
1584 int item = STACK_INT(-1);
1585 // if it is a T_BOOLEAN array, mask the stored value to 0/1
1586 if (arrObj->klass() == Universe::boolArrayKlass()) {
1587 item &= 1;
1588 } else {
1589 assert(arrObj->klass() == Universe::byteArrayKlass(),
1590 "should be byte array otherwise");
1591 }
1592 ((typeArrayOop)arrObj)->byte_at_put(index, item);
1593 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1594 }
1595 CASE(_castore):
1596 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
1597 CASE(_sastore):
1598 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
1599 CASE(_lastore):
1600 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
1601 CASE(_dastore):
1602 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1603
1604 CASE(_arraylength):
1605 {
1606 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
1607 CHECK_NULL(ary);
1608 SET_STACK_INT(ary->length(), -1);
1609 UPDATE_PC_AND_CONTINUE(1);
1610 }
1611
1612 /* monitorenter and monitorexit for locking/unlocking an object */
1613
1614 CASE(_monitorenter): {
1615 oop lockee = STACK_OBJECT(-1);
1616 // derefing's lockee ought to provoke implicit null check
1617 CHECK_NULL(lockee);
1618 // find a free monitor or one already allocated for this object
1619 // if we find a matching object then we need a new monitor
1620 // since this is recursive enter
1621 BasicObjectLock* limit = istate->monitor_base();
1622 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1623 BasicObjectLock* entry = nullptr;
1624 while (most_recent != limit ) {
1625 if (most_recent->obj() == nullptr) entry = most_recent;
1626 else if (most_recent->obj() == lockee) break;
1627 most_recent++;
1628 }
1629 if (entry != nullptr) {
1630 entry->set_obj(lockee);
1631 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1632 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1633 } else {
1634 istate->set_msg(more_monitors);
1635 UPDATE_PC_AND_RETURN(0); // Re-execute
1636 }
1637 }
1638
1639 CASE(_monitorexit): {
1640 oop lockee = STACK_OBJECT(-1);
1641 CHECK_NULL(lockee);
1642 // derefing's lockee ought to provoke implicit null check
1643 // find our monitor slot
1644 BasicObjectLock* limit = istate->monitor_base();
1645 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1646 while (most_recent != limit ) {
1647 if ((most_recent)->obj() == lockee) {
1648 BasicLock* lock = most_recent->lock();
1649 InterpreterRuntime::monitorexit(most_recent);
1650 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1651 }
1652 most_recent++;
1653 }
1654 // Need to throw illegal monitor state exception
1655 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1656 ShouldNotReachHere();
1657 }
1658
1659 /* All of the non-quick opcodes. */
1660
1661 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1662 * constant pool index in the instruction.
1663 */
1664 CASE(_getfield):
1665 CASE(_nofast_getfield):
1666 CASE(_getstatic):
1667 {
1668 u2 index;
1669 index = Bytes::get_native_u2(pc+1);
1670 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
1671
1672 // QQQ Need to make this as inlined as possible. Probably need to
1673 // split all the bytecode cases out so c++ compiler has a chance
1674 // for constant prop to fold everything possible away.
1675
1676 // Interpreter runtime does not expect "nofast" opcodes,
1677 // prepare the vanilla opcode for it.
1678 Bytecodes::Code code = (Bytecodes::Code)opcode;
1679 if (code == Bytecodes::_nofast_getfield) {
1680 code = Bytecodes::_getfield;
1681 }
1682
1683 if (!entry->is_resolved(code)) {
1684 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, code),
1685 handle_exception);
1686 entry = cp->resolved_field_entry_at(index);
1687 }
1688
1689 oop obj;
1690 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
1691 Klass* k = entry->field_holder();
1692 obj = k->java_mirror();
1693 MORE_STACK(1); // Assume single slot push
1694 } else {
1695 obj = STACK_OBJECT(-1);
1696 CHECK_NULL(obj);
1697 // Check if we can rewrite non-volatile _getfield to one of the _fast_Xgetfield.
1698 if (REWRITE_BYTECODES && !entry->is_volatile() &&
1699 ((Bytecodes::Code)opcode != Bytecodes::_nofast_getfield)) {
1700 // Rewrite current BC to _fast_Xgetfield.
1701 REWRITE_AT_PC(fast_get_type((TosState)(entry->tos_state())));
1702 }
1703 }
1704
1705 MAYBE_POST_FIELD_ACCESS(obj);
1706
1707 //
1708 // Now store the result on the stack
1709 //
1710 TosState tos_type = (TosState)(entry->tos_state());
1711 int field_offset = entry->field_offset();
1712 if (entry->is_volatile()) {
1713 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
1714 OrderAccess::fence();
1715 }
1716 switch (tos_type) {
1717 case btos:
1718 case ztos:
1719 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
1720 break;
1721 case ctos:
1722 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
1723 break;
1724 case stos:
1725 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
1726 break;
1727 case itos:
1728 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
1729 break;
1730 case ftos:
1731 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
1732 break;
1733 case ltos:
1734 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
1735 MORE_STACK(1);
1736 break;
1737 case dtos:
1738 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
1739 MORE_STACK(1);
1740 break;
1741 case atos: {
1742 assert(!entry->is_flat(), "Flat volatile field not supported");
1743 oop val = obj->obj_field_acquire(field_offset);
1744 VERIFY_OOP(val);
1745 SET_STACK_OBJECT(val, -1);
1746 break;
1747 }
1748 default:
1749 ShouldNotReachHere();
1750 }
1751 } else {
1752 switch (tos_type) {
1753 case btos:
1754 case ztos:
1755 SET_STACK_INT(obj->byte_field(field_offset), -1);
1756 break;
1757 case ctos:
1758 SET_STACK_INT(obj->char_field(field_offset), -1);
1759 break;
1760 case stos:
1761 SET_STACK_INT(obj->short_field(field_offset), -1);
1762 break;
1763 case itos:
1764 SET_STACK_INT(obj->int_field(field_offset), -1);
1765 break;
1766 case ftos:
1767 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
1768 break;
1769 case ltos:
1770 SET_STACK_LONG(obj->long_field(field_offset), 0);
1771 MORE_STACK(1);
1772 break;
1773 case dtos:
1774 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
1775 MORE_STACK(1);
1776 break;
1777 case atos: {
1778 oop val;
1779 if (entry->is_flat()) {
1780 CALL_VM(InterpreterRuntime::read_flat_field(THREAD, obj, entry), handle_exception);
1781 val = THREAD->vm_result_oop();
1782 THREAD->set_vm_result_oop(nullptr);
1783 } else {
1784 val = obj->obj_field(field_offset);
1785 }
1786 VERIFY_OOP(val);
1787 SET_STACK_OBJECT(val, -1);
1788 break;
1789 }
1790 default:
1791 ShouldNotReachHere();
1792 }
1793 }
1794
1795 UPDATE_PC_AND_CONTINUE(3);
1796 }
1797
1798 CASE(_putfield):
1799 CASE(_nofast_putfield):
1800 CASE(_putstatic):
1801 {
1802 u2 index = Bytes::get_native_u2(pc+1);
1803 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
1804
1805 // Interpreter runtime does not expect "nofast" opcodes,
1806 // prepare the vanilla opcode for it.
1807 Bytecodes::Code code = (Bytecodes::Code)opcode;
1808 if (code == Bytecodes::_nofast_putfield) {
1809 code = Bytecodes::_putfield;
1810 }
1811
1812 if (!entry->is_resolved(code)) {
1813 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, code),
1814 handle_exception);
1815 entry = cp->resolved_field_entry_at(index);
1816 }
1817
1818 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
1819 // out so c++ compiler has a chance for constant prop to fold everything possible away.
1820
1821 oop obj;
1822 int count;
1823 TosState tos_type = (TosState)(entry->tos_state());
1824
1825 count = -1;
1826 if (tos_type == ltos || tos_type == dtos) {
1827 --count;
1828 }
1829 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
1830 Klass* k = entry->field_holder();
1831 obj = k->java_mirror();
1832 } else {
1833 --count;
1834 obj = STACK_OBJECT(count);
1835 CHECK_NULL(obj);
1836
1837 // Check if we can rewrite non-volatile _putfield to one of the _fast_Xputfield.
1838 if (REWRITE_BYTECODES && !entry->is_volatile() &&
1839 ((Bytecodes::Code)opcode != Bytecodes::_nofast_putfield)) {
1840 // Rewrite current BC to _fast_Xputfield.
1841 REWRITE_AT_PC(fast_put_type((TosState)(entry->tos_state())));
1842 }
1843 }
1844
1845 MAYBE_POST_FIELD_MODIFICATION(obj);
1846
1847 //
1848 // Now store the result
1849 //
1850 int field_offset = entry->field_offset();
1851 if (entry->is_volatile()) {
1852 switch (tos_type) {
1853 case ztos:
1854 obj->release_byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
1855 break;
1856 case btos:
1857 obj->release_byte_field_put(field_offset, STACK_INT(-1));
1858 break;
1859 case ctos:
1860 obj->release_char_field_put(field_offset, STACK_INT(-1));
1861 break;
1862 case stos:
1863 obj->release_short_field_put(field_offset, STACK_INT(-1));
1864 break;
1865 case itos:
1866 obj->release_int_field_put(field_offset, STACK_INT(-1));
1867 break;
1868 case ftos:
1869 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
1870 break;
1871 case ltos:
1872 obj->release_long_field_put(field_offset, STACK_LONG(-1));
1873 break;
1874 case dtos:
1875 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
1876 break;
1877 case atos: {
1878 assert(!entry->is_flat(), "Flat volatile field not supported");
1879 oop val = STACK_OBJECT(-1);
1880 VERIFY_OOP(val);
1881 obj->release_obj_field_put(field_offset, val);
1882 break;
1883 }
1884 default:
1885 ShouldNotReachHere();
1886 }
1887 OrderAccess::storeload();
1888 } else {
1889 switch (tos_type) {
1890 case ztos:
1891 obj->byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
1892 break;
1893 case btos:
1894 obj->byte_field_put(field_offset, STACK_INT(-1));
1895 break;
1896 case ctos:
1897 obj->char_field_put(field_offset, STACK_INT(-1));
1898 break;
1899 case stos:
1900 obj->short_field_put(field_offset, STACK_INT(-1));
1901 break;
1902 case itos:
1903 obj->int_field_put(field_offset, STACK_INT(-1));
1904 break;
1905 case ftos:
1906 obj->float_field_put(field_offset, STACK_FLOAT(-1));
1907 break;
1908 case ltos:
1909 obj->long_field_put(field_offset, STACK_LONG(-1));
1910 break;
1911 case dtos:
1912 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
1913 break;
1914 case atos: {
1915 oop val = STACK_OBJECT(-1);
1916 VERIFY_OOP(val);
1917 if (entry->is_flat()) {
1918 CALL_VM(InterpreterRuntime::write_flat_field(THREAD, obj, val, entry), handle_exception);
1919 } else {
1920 obj->obj_field_put(field_offset, val);
1921 }
1922 break;
1923 }
1924 default:
1925 ShouldNotReachHere();
1926 }
1927 }
1928
1929 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
1930 }
1931
1932 CASE(_new): {
1933 u2 index = Bytes::get_Java_u2(pc+1);
1934
1935 // Attempt TLAB allocation first.
1936 //
1937 // To do this, we need to make sure:
1938 // - klass is initialized
1939 // - klass can be fastpath allocated (e.g. does not have finalizer)
1940 // - TLAB accepts the allocation
1941 ConstantPool* constants = istate->method()->constants();
1942 if (UseTLAB && !constants->tag_at(index).is_unresolved_klass()) {
1943 Klass* entry = constants->resolved_klass_at(index);
1944 InstanceKlass* ik = InstanceKlass::cast(entry);
1945 if (ik->is_initialized() && ik->can_be_fastpath_allocated()) {
1946 size_t obj_size = ik->size_helper();
1947 HeapWord* result = THREAD->tlab().allocate(obj_size);
1948 if (result != nullptr) {
1949 // Initialize object field block.
1950 if (!ZeroTLAB) {
1951 // The TLAB was not pre-zeroed, we need to clear the memory here.
1952 size_t hdr_size = oopDesc::header_size();
1953 Copy::fill_to_words(result + hdr_size, obj_size - hdr_size, 0);
1954 }
1955
1956 // Initialize header, mirrors MemAllocator.
1957 if (UseCompactObjectHeaders) {
1958 oopDesc::release_set_mark(result, ik->prototype_header());
1959 } else {
1960 oopDesc::set_mark(result, markWord::prototype());
1961 if (oopDesc::has_klass_gap()) {
1962 oopDesc::set_klass_gap(result, 0);
1963 }
1964 oopDesc::release_set_klass(result, ik);
1965 }
1966 oop obj = cast_to_oop(result);
1967
1968 // Must prevent reordering of stores for object initialization
1969 // with stores that publish the new object.
1970 OrderAccess::storestore();
1971 SET_STACK_OBJECT(obj, 0);
1972 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1973 }
1974 }
1975 }
1976 // Slow case allocation
1977 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
1978 handle_exception);
1979 // Must prevent reordering of stores for object initialization
1980 // with stores that publish the new object.
1981 OrderAccess::storestore();
1982 SET_STACK_OBJECT(THREAD->vm_result_oop(), 0);
1983 THREAD->set_vm_result_oop(nullptr);
1984 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1985 }
1986 CASE(_anewarray): {
1987 u2 index = Bytes::get_Java_u2(pc+1);
1988 jint size = STACK_INT(-1);
1989 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
1990 handle_exception);
1991 // Must prevent reordering of stores for object initialization
1992 // with stores that publish the new object.
1993 OrderAccess::storestore();
1994 SET_STACK_OBJECT(THREAD->vm_result_oop(), -1);
1995 THREAD->set_vm_result_oop(nullptr);
1996 UPDATE_PC_AND_CONTINUE(3);
1997 }
1998 CASE(_multianewarray): {
1999 jint dims = *(pc+3);
2000 jint size = STACK_INT(-1);
2001 // stack grows down, dimensions are up!
2002 jint *dimarray =
2003 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
2004 Interpreter::stackElementWords-1];
2005 //adjust pointer to start of stack element
2006 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
2007 handle_exception);
2008 // Must prevent reordering of stores for object initialization
2009 // with stores that publish the new object.
2010 OrderAccess::storestore();
2011 SET_STACK_OBJECT(THREAD->vm_result_oop(), -dims);
2012 THREAD->set_vm_result_oop(nullptr);
2013 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
2014 }
2015 CASE(_checkcast):
2016 if (STACK_OBJECT(-1) != nullptr) {
2017 VERIFY_OOP(STACK_OBJECT(-1));
2018 u2 index = Bytes::get_Java_u2(pc+1);
2019 // Constant pool may have actual klass or unresolved klass. If it is
2020 // unresolved we must resolve it.
2021 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2022 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2023 }
2024 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index);
2025 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
2026 //
2027 // Check for compatibility. This check must not GC!!
2028 // Seems way more expensive now that we must dispatch.
2029 //
2030 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
2031 ResourceMark rm(THREAD);
2032 char* message = SharedRuntime::generate_class_cast_message(
2033 objKlass, klassOf);
2034 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message);
2035 }
2036 }
2037 UPDATE_PC_AND_CONTINUE(3);
2038
2039 CASE(_instanceof):
2040 if (STACK_OBJECT(-1) == nullptr) {
2041 SET_STACK_INT(0, -1);
2042 } else {
2043 VERIFY_OOP(STACK_OBJECT(-1));
2044 u2 index = Bytes::get_Java_u2(pc+1);
2045 // Constant pool may have actual klass or unresolved klass. If it is
2046 // unresolved we must resolve it.
2047 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2048 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2049 }
2050 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index);
2051 Klass* objKlass = STACK_OBJECT(-1)->klass();
2052 //
2053 // Check for compatibility. This check must not GC!!
2054 // Seems way more expensive now that we must dispatch.
2055 //
2056 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
2057 SET_STACK_INT(1, -1);
2058 } else {
2059 SET_STACK_INT(0, -1);
2060 }
2061 }
2062 UPDATE_PC_AND_CONTINUE(3);
2063
2064 CASE(_ldc_w):
2065 CASE(_ldc):
2066 {
2067 u2 index;
2068 bool wide = false;
2069 int incr = 2; // frequent case
2070 if (opcode == Bytecodes::_ldc) {
2071 index = pc[1];
2072 } else {
2073 index = Bytes::get_Java_u2(pc+1);
2074 incr = 3;
2075 wide = true;
2076 }
2077
2078 ConstantPool* constants = METHOD->constants();
2079 switch (constants->tag_at(index).value()) {
2080 case JVM_CONSTANT_Integer:
2081 SET_STACK_INT(constants->int_at(index), 0);
2082 break;
2083
2084 case JVM_CONSTANT_Float:
2085 SET_STACK_FLOAT(constants->float_at(index), 0);
2086 break;
2087
2088 case JVM_CONSTANT_String:
2089 {
2090 oop result = constants->resolved_reference_at(index);
2091 if (result == nullptr) {
2092 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2093 SET_STACK_OBJECT(THREAD->vm_result_oop(), 0);
2094 THREAD->set_vm_result_oop(nullptr);
2095 } else {
2096 VERIFY_OOP(result);
2097 SET_STACK_OBJECT(result, 0);
2098 }
2099 break;
2100 }
2101
2102 case JVM_CONSTANT_Class:
2103 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
2104 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
2105 break;
2106
2107 case JVM_CONSTANT_UnresolvedClass:
2108 case JVM_CONSTANT_UnresolvedClassInError:
2109 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
2110 SET_STACK_OBJECT(THREAD->vm_result_oop(), 0);
2111 THREAD->set_vm_result_oop(nullptr);
2112 break;
2113
2114 case JVM_CONSTANT_Dynamic:
2115 case JVM_CONSTANT_DynamicInError:
2116 {
2117 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2118 oop result = THREAD->vm_result_oop();
2119 VERIFY_OOP(result);
2120
2121 jvalue value;
2122 BasicType type = java_lang_boxing_object::get_value(result, &value);
2123 switch (type) {
2124 case T_FLOAT: SET_STACK_FLOAT(value.f, 0); break;
2125 case T_INT: SET_STACK_INT(value.i, 0); break;
2126 case T_SHORT: SET_STACK_INT(value.s, 0); break;
2127 case T_BYTE: SET_STACK_INT(value.b, 0); break;
2128 case T_CHAR: SET_STACK_INT(value.c, 0); break;
2129 case T_BOOLEAN: SET_STACK_INT(value.z, 0); break;
2130 default: ShouldNotReachHere();
2131 }
2132
2133 break;
2134 }
2135
2136 default: ShouldNotReachHere();
2137 }
2138 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2139 }
2140
2141 CASE(_ldc2_w):
2142 {
2143 u2 index = Bytes::get_Java_u2(pc+1);
2144
2145 ConstantPool* constants = METHOD->constants();
2146 switch (constants->tag_at(index).value()) {
2147
2148 case JVM_CONSTANT_Long:
2149 SET_STACK_LONG(constants->long_at(index), 1);
2150 break;
2151
2152 case JVM_CONSTANT_Double:
2153 SET_STACK_DOUBLE(constants->double_at(index), 1);
2154 break;
2155
2156 case JVM_CONSTANT_Dynamic:
2157 case JVM_CONSTANT_DynamicInError:
2158 {
2159 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2160 oop result = THREAD->vm_result_oop();
2161 VERIFY_OOP(result);
2162
2163 jvalue value;
2164 BasicType type = java_lang_boxing_object::get_value(result, &value);
2165 switch (type) {
2166 case T_DOUBLE: SET_STACK_DOUBLE(value.d, 1); break;
2167 case T_LONG: SET_STACK_LONG(value.j, 1); break;
2168 default: ShouldNotReachHere();
2169 }
2170
2171 break;
2172 }
2173
2174 default: ShouldNotReachHere();
2175 }
2176 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
2177 }
2178
2179 CASE(_fast_aldc_w):
2180 CASE(_fast_aldc): {
2181 u2 index;
2182 int incr;
2183 if (opcode == Bytecodes::_fast_aldc) {
2184 index = pc[1];
2185 incr = 2;
2186 } else {
2187 index = Bytes::get_native_u2(pc+1);
2188 incr = 3;
2189 }
2190
2191 // We are resolved if the resolved_references array contains a non-null object (CallSite, etc.)
2192 // This kind of CP cache entry does not need to match the flags byte, because
2193 // there is a 1-1 relation between bytecode type and CP entry type.
2194 ConstantPool* constants = METHOD->constants();
2195 oop result = constants->resolved_reference_at(index);
2196 if (result == nullptr) {
2197 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
2198 handle_exception);
2199 result = THREAD->vm_result_oop();
2200 }
2201 if (result == Universe::the_null_sentinel())
2202 result = nullptr;
2203
2204 VERIFY_OOP(result);
2205 SET_STACK_OBJECT(result, 0);
2206 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2207 }
2208
2209 CASE(_invokedynamic): {
2210 u4 index = Bytes::get_native_u4(pc+1);
2211 ResolvedIndyEntry* indy_info = cp->resolved_indy_entry_at(index);
2212 if (!indy_info->is_resolved()) {
2213 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2214 handle_exception);
2215 indy_info = cp->resolved_indy_entry_at(index); // get resolved entry
2216 }
2217 Method* method = indy_info->method();
2218 if (VerifyOops) method->verify();
2219
2220 if (indy_info->has_appendix()) {
2221 constantPoolHandle cp(THREAD, METHOD->constants());
2222 SET_STACK_OBJECT(cp->resolved_reference_from_indy(index), 0);
2223 MORE_STACK(1);
2224 }
2225
2226 istate->set_msg(call_method);
2227 istate->set_callee(method);
2228 istate->set_callee_entry_point(method->from_interpreted_entry());
2229 istate->set_bcp_advance(5);
2230
2231 UPDATE_PC_AND_RETURN(0); // I'll be back...
2232 }
2233
2234 CASE(_invokehandle): {
2235
2236 u2 index = Bytes::get_native_u2(pc+1);
2237 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2238
2239 if (! entry->is_resolved((Bytecodes::Code) opcode)) {
2240 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2241 handle_exception);
2242 entry = cp->resolved_method_entry_at(index);
2243 }
2244
2245 Method* method = entry->method();
2246 if (VerifyOops) method->verify();
2247
2248 if (entry->has_appendix()) {
2249 constantPoolHandle cp(THREAD, METHOD->constants());
2250 SET_STACK_OBJECT(cp->cache()->appendix_if_resolved(entry), 0);
2251 MORE_STACK(1);
2252 }
2253
2254 istate->set_msg(call_method);
2255 istate->set_callee(method);
2256 istate->set_callee_entry_point(method->from_interpreted_entry());
2257 istate->set_bcp_advance(3);
2258
2259 UPDATE_PC_AND_RETURN(0); // I'll be back...
2260 }
2261
2262 CASE(_invokeinterface): {
2263 u2 index = Bytes::get_native_u2(pc+1);
2264
2265 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2266 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2267
2268 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2269 if (!entry->is_resolved((Bytecodes::Code)opcode)) {
2270 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2271 handle_exception);
2272 }
2273
2274 istate->set_msg(call_method);
2275
2276 // Special case of invokeinterface called for virtual method of
2277 // java.lang.Object. See cpCache.cpp for details.
2278 Method* callee = nullptr;
2279 if (entry->is_forced_virtual()) {
2280 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2281 if (entry->is_vfinal()) {
2282 callee = entry->method();
2283 } else {
2284 // Get receiver.
2285 int parms = entry->number_of_parameters();
2286 // Same comments as invokevirtual apply here.
2287 oop rcvr = STACK_OBJECT(-parms);
2288 VERIFY_OOP(rcvr);
2289 Klass* rcvrKlass = rcvr->klass();
2290 callee = (Method*) rcvrKlass->method_at_vtable(entry->table_index());
2291 }
2292 } else if (entry->is_vfinal()) {
2293 // private interface method invocations
2294 //
2295 // Ensure receiver class actually implements
2296 // the resolved interface class. The link resolver
2297 // does this, but only for the first time this
2298 // interface is being called.
2299 int parms = entry->number_of_parameters();
2300 oop rcvr = STACK_OBJECT(-parms);
2301 CHECK_NULL(rcvr);
2302 Klass* recv_klass = rcvr->klass();
2303 Klass* resolved_klass = entry->interface_klass();
2304 if (!recv_klass->is_subtype_of(resolved_klass)) {
2305 ResourceMark rm(THREAD);
2306 char buf[200];
2307 jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s",
2308 recv_klass->external_name(),
2309 resolved_klass->external_name());
2310 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
2311 }
2312 callee = entry->method();
2313 }
2314 if (callee != nullptr) {
2315 istate->set_callee(callee);
2316 istate->set_callee_entry_point(callee->from_interpreted_entry());
2317 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2318 istate->set_callee_entry_point(callee->interpreter_entry());
2319 }
2320 istate->set_bcp_advance(5);
2321 UPDATE_PC_AND_RETURN(0); // I'll be back...
2322 }
2323
2324 // this could definitely be cleaned up QQQ
2325 Method *interface_method = entry->method();
2326 InstanceKlass* iclass = interface_method->method_holder();
2327
2328 // get receiver
2329 int parms = entry->number_of_parameters();
2330 oop rcvr = STACK_OBJECT(-parms);
2331 CHECK_NULL(rcvr);
2332 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
2333
2334 // Receiver subtype check against resolved interface klass (REFC).
2335 {
2336 Klass* refc = entry->interface_klass();
2337 itableOffsetEntry* scan;
2338 for (scan = (itableOffsetEntry*) int2->start_of_itable();
2339 scan->interface_klass() != nullptr;
2340 scan++) {
2341 if (scan->interface_klass() == refc) {
2342 break;
2343 }
2344 }
2345 // Check that the entry is non-null. A null entry means
2346 // that the receiver class doesn't implement the
2347 // interface, and wasn't the same as when the caller was
2348 // compiled.
2349 if (scan->interface_klass() == nullptr) {
2350 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "");
2351 }
2352 }
2353
2354 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
2355 int i;
2356 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
2357 if (ki->interface_klass() == iclass) break;
2358 }
2359 // If the interface isn't found, this class doesn't implement this
2360 // interface. The link resolver checks this but only for the first
2361 // time this interface is called.
2362 if (i == int2->itable_length()) {
2363 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(THREAD, rcvr->klass(), iclass),
2364 handle_exception);
2365 }
2366 int mindex = interface_method->itable_index();
2367
2368 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
2369 callee = im[mindex].method();
2370 if (callee == nullptr) {
2371 CALL_VM(InterpreterRuntime::throw_AbstractMethodErrorVerbose(THREAD, rcvr->klass(), interface_method),
2372 handle_exception);
2373 }
2374
2375 istate->set_callee(callee);
2376 istate->set_callee_entry_point(callee->from_interpreted_entry());
2377 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2378 istate->set_callee_entry_point(callee->interpreter_entry());
2379 }
2380 istate->set_bcp_advance(5);
2381 UPDATE_PC_AND_RETURN(0); // I'll be back...
2382 }
2383
2384 CASE(_invokevirtual):
2385 CASE(_invokespecial):
2386 CASE(_invokestatic): {
2387 u2 index = Bytes::get_native_u2(pc+1);
2388
2389 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2390 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2391 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2392
2393 if (!entry->is_resolved((Bytecodes::Code)opcode)) {
2394 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2395 handle_exception);
2396 entry = cp->resolved_method_entry_at(index);
2397 }
2398
2399 istate->set_msg(call_method);
2400 {
2401 Method* callee;
2402 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
2403 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2404 if (entry->is_vfinal()) {
2405 callee = entry->method();
2406 if (REWRITE_BYTECODES && !CDSConfig::is_using_archive() && !CDSConfig::is_dumping_archive()) {
2407 // Rewrite to _fast_invokevfinal.
2408 REWRITE_AT_PC(Bytecodes::_fast_invokevfinal);
2409 }
2410 } else {
2411 // get receiver
2412 int parms = entry->number_of_parameters();
2413 // this works but needs a resourcemark and seems to create a vtable on every call:
2414 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
2415 //
2416 // this fails with an assert
2417 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
2418 // but this works
2419 oop rcvr = STACK_OBJECT(-parms);
2420 VERIFY_OOP(rcvr);
2421 Klass* rcvrKlass = rcvr->klass();
2422 /*
2423 Executing this code in java.lang.String:
2424 public String(char value[]) {
2425 this.count = value.length;
2426 this.value = (char[])value.clone();
2427 }
2428
2429 a find on rcvr->klass() reports:
2430 {type array char}{type array class}
2431 - klass: {other class}
2432
2433 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
2434 because rcvr->klass()->is_instance_klass() == 0
2435 However it seems to have a vtable in the right location. Huh?
2436 Because vtables have the same offset for ArrayKlass and InstanceKlass.
2437 */
2438 callee = (Method*) rcvrKlass->method_at_vtable(entry->table_index());
2439 }
2440 } else {
2441 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
2442 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2443 }
2444 callee = entry->method();
2445 }
2446
2447 istate->set_callee(callee);
2448 istate->set_callee_entry_point(callee->from_interpreted_entry());
2449 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2450 istate->set_callee_entry_point(callee->interpreter_entry());
2451 }
2452 istate->set_bcp_advance(3);
2453 UPDATE_PC_AND_RETURN(0); // I'll be back...
2454 }
2455 }
2456
2457 /* Allocate memory for a new java object. */
2458
2459 CASE(_newarray): {
2460 BasicType atype = (BasicType) *(pc+1);
2461 jint size = STACK_INT(-1);
2462 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
2463 handle_exception);
2464 // Must prevent reordering of stores for object initialization
2465 // with stores that publish the new object.
2466 OrderAccess::storestore();
2467 SET_STACK_OBJECT(THREAD->vm_result_oop(), -1);
2468 THREAD->set_vm_result_oop(nullptr);
2469
2470 UPDATE_PC_AND_CONTINUE(2);
2471 }
2472
2473 /* Throw an exception. */
2474
2475 CASE(_athrow): {
2476 oop except_oop = STACK_OBJECT(-1);
2477 CHECK_NULL(except_oop);
2478 // set pending_exception so we use common code
2479 THREAD->set_pending_exception(except_oop, nullptr, 0);
2480 goto handle_exception;
2481 }
2482
2483 /* goto and jsr. They are exactly the same except jsr pushes
2484 * the address of the next instruction first.
2485 */
2486
2487 CASE(_jsr): {
2488 /* push bytecode index on stack */
2489 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
2490 MORE_STACK(1);
2491 /* FALL THROUGH */
2492 }
2493
2494 CASE(_goto):
2495 {
2496 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
2497 address branch_pc = pc;
2498 UPDATE_PC(offset);
2499 DO_BACKEDGE_CHECKS(offset, branch_pc);
2500 CONTINUE;
2501 }
2502
2503 CASE(_jsr_w): {
2504 /* push return address on the stack */
2505 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
2506 MORE_STACK(1);
2507 /* FALL THROUGH */
2508 }
2509
2510 CASE(_goto_w):
2511 {
2512 int32_t offset = Bytes::get_Java_u4(pc + 1);
2513 address branch_pc = pc;
2514 UPDATE_PC(offset);
2515 DO_BACKEDGE_CHECKS(offset, branch_pc);
2516 CONTINUE;
2517 }
2518
2519 /* return from a jsr or jsr_w */
2520
2521 CASE(_ret): {
2522 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
2523 UPDATE_PC_AND_CONTINUE(0);
2524 }
2525
2526 /* debugger breakpoint */
2527
2528 CASE(_breakpoint): {
2529 Bytecodes::Code original_bytecode;
2530 DECACHE_STATE();
2531 SET_LAST_JAVA_FRAME();
2532 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
2533 METHOD, pc);
2534 RESET_LAST_JAVA_FRAME();
2535 CACHE_STATE();
2536 if (THREAD->has_pending_exception()) goto handle_exception;
2537 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
2538 handle_exception);
2539
2540 opcode = (jubyte)original_bytecode;
2541 goto opcode_switch;
2542 }
2543
2544 CASE(_fast_agetfield): {
2545 u2 index = Bytes::get_native_u2(pc+1);
2546 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2547 int field_offset = entry->field_offset();
2548
2549 oop obj = STACK_OBJECT(-1);
2550 CHECK_NULL(obj);
2551
2552 MAYBE_POST_FIELD_ACCESS(obj);
2553
2554 oop val;
2555 if (entry->is_flat()) {
2556 CALL_VM(InterpreterRuntime::read_flat_field(THREAD, obj, entry), handle_exception);
2557 val = THREAD->vm_result_oop();
2558 THREAD->set_vm_result_oop(nullptr);
2559 } else {
2560 val = obj->obj_field(field_offset);
2561 }
2562
2563 VERIFY_OOP(val);
2564 SET_STACK_OBJECT(val, -1);
2565 UPDATE_PC_AND_CONTINUE(3);
2566 }
2567
2568 CASE(_fast_bgetfield): {
2569 u2 index = Bytes::get_native_u2(pc+1);
2570 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2571 int field_offset = entry->field_offset();
2572
2573 oop obj = STACK_OBJECT(-1);
2574 CHECK_NULL(obj);
2575
2576 MAYBE_POST_FIELD_ACCESS(obj);
2577
2578 SET_STACK_INT(obj->byte_field(field_offset), -1);
2579 UPDATE_PC_AND_CONTINUE(3);
2580 }
2581
2582 CASE(_fast_cgetfield): {
2583 u2 index = Bytes::get_native_u2(pc+1);
2584 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2585 int field_offset = entry->field_offset();
2586
2587 oop obj = STACK_OBJECT(-1);
2588 CHECK_NULL(obj);
2589
2590 MAYBE_POST_FIELD_ACCESS(obj);
2591
2592 SET_STACK_INT(obj->char_field(field_offset), -1);
2593 UPDATE_PC_AND_CONTINUE(3);
2594 }
2595
2596 CASE(_fast_dgetfield): {
2597 u2 index = Bytes::get_native_u2(pc+1);
2598 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2599 int field_offset = entry->field_offset();
2600
2601 oop obj = STACK_OBJECT(-1);
2602 CHECK_NULL(obj);
2603
2604 MAYBE_POST_FIELD_ACCESS(obj);
2605
2606 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
2607 MORE_STACK(1);
2608 UPDATE_PC_AND_CONTINUE(3);
2609 }
2610
2611 CASE(_fast_fgetfield): {
2612 u2 index = Bytes::get_native_u2(pc+1);
2613 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2614 int field_offset = entry->field_offset();
2615
2616 oop obj = STACK_OBJECT(-1);
2617 CHECK_NULL(obj);
2618
2619 MAYBE_POST_FIELD_ACCESS(obj);
2620
2621 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
2622 UPDATE_PC_AND_CONTINUE(3);
2623 }
2624
2625 CASE(_fast_igetfield): {
2626 u2 index = Bytes::get_native_u2(pc+1);
2627 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2628 int field_offset = entry->field_offset();
2629
2630 oop obj = STACK_OBJECT(-1);
2631 CHECK_NULL(obj);
2632
2633 MAYBE_POST_FIELD_ACCESS(obj);
2634
2635 SET_STACK_INT(obj->int_field(field_offset), -1);
2636 UPDATE_PC_AND_CONTINUE(3);
2637 }
2638
2639 CASE(_fast_lgetfield): {
2640 u2 index = Bytes::get_native_u2(pc+1);
2641 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2642 int field_offset = entry->field_offset();
2643
2644 oop obj = STACK_OBJECT(-1);
2645 CHECK_NULL(obj);
2646
2647 MAYBE_POST_FIELD_ACCESS(obj);
2648
2649 SET_STACK_LONG(obj->long_field(field_offset), 0);
2650 MORE_STACK(1);
2651 UPDATE_PC_AND_CONTINUE(3);
2652 }
2653
2654 CASE(_fast_sgetfield): {
2655 u2 index = Bytes::get_native_u2(pc+1);
2656 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2657 int field_offset = entry->field_offset();
2658
2659 oop obj = STACK_OBJECT(-1);
2660 CHECK_NULL(obj);
2661
2662 MAYBE_POST_FIELD_ACCESS(obj);
2663
2664 SET_STACK_INT(obj->short_field(field_offset), -1);
2665 UPDATE_PC_AND_CONTINUE(3);
2666 }
2667
2668 CASE(_fast_aputfield): {
2669 u2 index = Bytes::get_native_u2(pc+1);
2670 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2671
2672 oop obj = STACK_OBJECT(-2);
2673 CHECK_NULL(obj);
2674
2675 MAYBE_POST_FIELD_MODIFICATION(obj);
2676
2677 int field_offset = entry->field_offset();
2678 oop val = STACK_OBJECT(-1);
2679
2680 if (entry->is_null_free_inline_type()) {
2681 CHECK_NULL_MSG(val, "Value is null");
2682 }
2683
2684 if (entry->is_flat()) {
2685 CALL_VM(InterpreterRuntime::write_flat_field(THREAD, obj, val, entry), handle_exception);
2686 } else {
2687 obj->obj_field_put(field_offset, val);
2688 }
2689
2690 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2691 }
2692
2693 CASE(_fast_bputfield): {
2694 u2 index = Bytes::get_native_u2(pc+1);
2695 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2696
2697 oop obj = STACK_OBJECT(-2);
2698 CHECK_NULL(obj);
2699
2700 MAYBE_POST_FIELD_MODIFICATION(obj);
2701
2702 int field_offset = entry->field_offset();
2703 obj->byte_field_put(field_offset, STACK_INT(-1));
2704
2705 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2706 }
2707
2708 CASE(_fast_zputfield): {
2709 u2 index = Bytes::get_native_u2(pc+1);
2710 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2711
2712 oop obj = STACK_OBJECT(-2);
2713 CHECK_NULL(obj);
2714
2715 MAYBE_POST_FIELD_MODIFICATION(obj);
2716
2717 int field_offset = entry->field_offset();
2718 obj->byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
2719
2720 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2721 }
2722
2723 CASE(_fast_cputfield): {
2724 u2 index = Bytes::get_native_u2(pc+1);
2725 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2726
2727 oop obj = STACK_OBJECT(-2);
2728 CHECK_NULL(obj);
2729
2730 MAYBE_POST_FIELD_MODIFICATION(obj);
2731
2732 int field_offset = entry->field_offset();
2733 obj->char_field_put(field_offset, STACK_INT(-1));
2734
2735 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2736 }
2737
2738 CASE(_fast_dputfield): {
2739 u2 index = Bytes::get_native_u2(pc+1);
2740 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2741
2742 oop obj = STACK_OBJECT(-3);
2743 CHECK_NULL(obj);
2744
2745 MAYBE_POST_FIELD_MODIFICATION(obj);
2746
2747 int field_offset = entry->field_offset();
2748 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
2749
2750 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3);
2751 }
2752
2753 CASE(_fast_fputfield): {
2754 u2 index = Bytes::get_native_u2(pc+1);
2755 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2756
2757 oop obj = STACK_OBJECT(-2);
2758 CHECK_NULL(obj);
2759
2760 MAYBE_POST_FIELD_MODIFICATION(obj);
2761
2762 int field_offset = entry->field_offset();
2763 obj->float_field_put(field_offset, STACK_FLOAT(-1));
2764
2765 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2766 }
2767
2768 CASE(_fast_iputfield): {
2769 u2 index = Bytes::get_native_u2(pc+1);
2770 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2771
2772 oop obj = STACK_OBJECT(-2);
2773 CHECK_NULL(obj);
2774
2775 MAYBE_POST_FIELD_MODIFICATION(obj);
2776
2777 int field_offset = entry->field_offset();
2778 obj->int_field_put(field_offset, STACK_INT(-1));
2779
2780 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2781 }
2782
2783 CASE(_fast_lputfield): {
2784 u2 index = Bytes::get_native_u2(pc+1);
2785 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2786
2787 oop obj = STACK_OBJECT(-3);
2788 CHECK_NULL(obj);
2789
2790 MAYBE_POST_FIELD_MODIFICATION(obj);
2791
2792 int field_offset = entry->field_offset();
2793 obj->long_field_put(field_offset, STACK_LONG(-1));
2794
2795 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3);
2796 }
2797
2798 CASE(_fast_sputfield): {
2799 u2 index = Bytes::get_native_u2(pc+1);
2800 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2801
2802 oop obj = STACK_OBJECT(-2);
2803 CHECK_NULL(obj);
2804
2805 MAYBE_POST_FIELD_MODIFICATION(obj);
2806
2807 int field_offset = entry->field_offset();
2808 obj->short_field_put(field_offset, STACK_INT(-1));
2809
2810 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2811 }
2812
2813 CASE(_fast_aload_0): {
2814 oop obj = LOCALS_OBJECT(0);
2815 VERIFY_OOP(obj);
2816 SET_STACK_OBJECT(obj, 0);
2817 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
2818 }
2819
2820 CASE(_fast_aaccess_0): {
2821 u2 index = Bytes::get_native_u2(pc+2);
2822 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2823 int field_offset = entry->field_offset();
2824
2825 oop obj = LOCALS_OBJECT(0);
2826 CHECK_NULL(obj);
2827 VERIFY_OOP(obj);
2828
2829 MAYBE_POST_FIELD_ACCESS(obj);
2830
2831 oop val;
2832 if (entry->is_flat()) {
2833 CALL_VM(InterpreterRuntime::read_flat_field(THREAD, obj, entry), handle_exception);
2834 val = THREAD->vm_result_oop();
2835 THREAD->set_vm_result_oop(nullptr);
2836 } else {
2837 val = obj->obj_field(field_offset);
2838 }
2839
2840 VERIFY_OOP(val);
2841 SET_STACK_OBJECT(val, 0);
2842 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
2843 }
2844
2845 CASE(_fast_iaccess_0): {
2846 u2 index = Bytes::get_native_u2(pc+2);
2847 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2848 int field_offset = entry->field_offset();
2849
2850 oop obj = LOCALS_OBJECT(0);
2851 CHECK_NULL(obj);
2852 VERIFY_OOP(obj);
2853
2854 MAYBE_POST_FIELD_ACCESS(obj);
2855
2856 SET_STACK_INT(obj->int_field(field_offset), 0);
2857 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
2858 }
2859
2860 CASE(_fast_faccess_0): {
2861 u2 index = Bytes::get_native_u2(pc+2);
2862 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2863 int field_offset = entry->field_offset();
2864
2865 oop obj = LOCALS_OBJECT(0);
2866 CHECK_NULL(obj);
2867 VERIFY_OOP(obj);
2868
2869 MAYBE_POST_FIELD_ACCESS(obj);
2870
2871 SET_STACK_FLOAT(obj->float_field(field_offset), 0);
2872 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
2873 }
2874
2875 CASE(_fast_invokevfinal): {
2876 u2 index = Bytes::get_native_u2(pc+1);
2877 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2878
2879 assert(entry->is_resolved(Bytecodes::_invokevirtual), "Should be resolved before rewriting");
2880
2881 istate->set_msg(call_method);
2882
2883 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2884 Method* callee = entry->method();
2885 istate->set_callee(callee);
2886 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2887 istate->set_callee_entry_point(callee->interpreter_entry());
2888 } else {
2889 istate->set_callee_entry_point(callee->from_interpreted_entry());
2890 }
2891 istate->set_bcp_advance(3);
2892 UPDATE_PC_AND_RETURN(0);
2893 }
2894
2895 DEFAULT:
2896 fatal("Unimplemented opcode %d = %s", opcode,
2897 Bytecodes::name((Bytecodes::Code)opcode));
2898 goto finish;
2899
2900 } /* switch(opc) */
2901
2902
2903 #ifdef USELABELS
2904 check_for_exception:
2905 #endif
2906 {
2907 if (!THREAD->has_pending_exception()) {
2908 CONTINUE;
2909 }
2910 /* We will be gcsafe soon, so flush our state. */
2911 DECACHE_PC();
2912 goto handle_exception;
2913 }
2914 do_continue: ;
2915
2916 } /* while (1) interpreter loop */
2917
2918
2919 // An exception exists in the thread state see whether this activation can handle it
2920 handle_exception: {
2921
2922 HandleMarkCleaner __hmc(THREAD);
2923 Handle except_oop(THREAD, THREAD->pending_exception());
2924 // Prevent any subsequent HandleMarkCleaner in the VM
2925 // from freeing the except_oop handle.
2926 HandleMark __hm(THREAD);
2927
2928 THREAD->clear_pending_exception();
2929 assert(except_oop() != nullptr, "No exception to process");
2930 intptr_t continuation_bci;
2931 // expression stack is emptied
2932 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2933 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
2934 handle_exception);
2935
2936 except_oop = Handle(THREAD, THREAD->vm_result_oop());
2937 THREAD->set_vm_result_oop(nullptr);
2938 if (continuation_bci >= 0) {
2939 // Place exception on top of stack
2940 SET_STACK_OBJECT(except_oop(), 0);
2941 MORE_STACK(1);
2942 pc = METHOD->code_base() + continuation_bci;
2943 if (log_is_enabled(Info, exceptions)) {
2944 ResourceMark rm(THREAD);
2945 stringStream tempst;
2946 tempst.print("interpreter method <%s>\n"
2947 " at bci %d, continuing at %d for thread " INTPTR_FORMAT,
2948 METHOD->print_value_string(),
2949 (int)(istate->bcp() - METHOD->code_base()),
2950 (int)continuation_bci, p2i(THREAD));
2951 Exceptions::log_exception(except_oop, tempst.as_string());
2952 }
2953 // for AbortVMOnException flag
2954 Exceptions::debug_check_abort(except_oop);
2955 goto run;
2956 }
2957 if (log_is_enabled(Info, exceptions)) {
2958 ResourceMark rm;
2959 stringStream tempst;
2960 tempst.print("interpreter method <%s>\n"
2961 " at bci %d, unwinding for thread " INTPTR_FORMAT,
2962 METHOD->print_value_string(),
2963 (int)(istate->bcp() - METHOD->code_base()),
2964 p2i(THREAD));
2965 Exceptions::log_exception(except_oop, tempst.as_string());
2966 }
2967 // for AbortVMOnException flag
2968 Exceptions::debug_check_abort(except_oop);
2969
2970 // No handler in this activation, unwind and try again
2971 THREAD->set_pending_exception(except_oop(), nullptr, 0);
2972 goto handle_return;
2973 } // handle_exception:
2974
2975 // Return from an interpreter invocation with the result of the interpretation
2976 // on the top of the Java Stack (or a pending exception)
2977
2978 handle_Pop_Frame: {
2979
2980 // We don't really do anything special here except we must be aware
2981 // that we can get here without ever locking the method (if sync).
2982 // Also we skip the notification of the exit.
2983
2984 istate->set_msg(popping_frame);
2985 // Clear pending so while the pop is in process
2986 // we don't start another one if a call_vm is done.
2987 THREAD->clear_popframe_condition();
2988 // Let interpreter (only) see the we're in the process of popping a frame
2989 THREAD->set_pop_frame_in_process();
2990
2991 goto handle_return;
2992
2993 } // handle_Pop_Frame
2994
2995 // ForceEarlyReturn ends a method, and returns to the caller with a return value
2996 // given by the invoker of the early return.
2997 handle_Early_Return: {
2998
2999 istate->set_msg(early_return);
3000
3001 // Clear expression stack.
3002 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
3003
3004 JvmtiThreadState *ts = THREAD->jvmti_thread_state();
3005
3006 // Push the value to be returned.
3007 switch (istate->method()->result_type()) {
3008 case T_BOOLEAN:
3009 case T_SHORT:
3010 case T_BYTE:
3011 case T_CHAR:
3012 case T_INT:
3013 SET_STACK_INT(ts->earlyret_value().i, 0);
3014 MORE_STACK(1);
3015 break;
3016 case T_LONG:
3017 SET_STACK_LONG(ts->earlyret_value().j, 1);
3018 MORE_STACK(2);
3019 break;
3020 case T_FLOAT:
3021 SET_STACK_FLOAT(ts->earlyret_value().f, 0);
3022 MORE_STACK(1);
3023 break;
3024 case T_DOUBLE:
3025 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
3026 MORE_STACK(2);
3027 break;
3028 case T_ARRAY:
3029 case T_OBJECT:
3030 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
3031 MORE_STACK(1);
3032 break;
3033 default:
3034 ShouldNotReachHere();
3035 }
3036
3037 ts->clr_earlyret_value();
3038 ts->set_earlyret_oop(nullptr);
3039 ts->clr_earlyret_pending();
3040
3041 // Fall through to handle_return.
3042
3043 } // handle_Early_Return
3044
3045 handle_return: {
3046 // A storestore barrier is required to order initialization of
3047 // final fields with publishing the reference to the object that
3048 // holds the field. Without the barrier the value of final fields
3049 // can be observed to change.
3050 OrderAccess::storestore();
3051
3052 DECACHE_STATE();
3053
3054 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
3055 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
3056 Handle original_exception(THREAD, THREAD->pending_exception());
3057 Handle illegal_state_oop(THREAD, nullptr);
3058
3059 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
3060 // in any following VM entries from freeing our live handles, but illegal_state_oop
3061 // isn't really allocated yet and so doesn't become live until later and
3062 // in unpredictable places. Instead we must protect the places where we enter the
3063 // VM. It would be much simpler (and safer) if we could allocate a real handle with
3064 // a null oop in it and then overwrite the oop later as needed. This isn't
3065 // unfortunately isn't possible.
3066
3067 if (THREAD->has_pending_exception()) {
3068 THREAD->clear_pending_exception();
3069 }
3070
3071 //
3072 // As far as we are concerned we have returned. If we have a pending exception
3073 // that will be returned as this invocation's result. However if we get any
3074 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
3075 // will be our final result (i.e. monitor exception trumps a pending exception).
3076 //
3077
3078 // If we never locked the method (or really passed the point where we would have),
3079 // there is no need to unlock it (or look for other monitors), since that
3080 // could not have happened.
3081
3082 if (THREAD->do_not_unlock_if_synchronized()) {
3083
3084 // Never locked, reset the flag now because obviously any caller must
3085 // have passed their point of locking for us to have gotten here.
3086
3087 THREAD->set_do_not_unlock_if_synchronized(false);
3088 } else {
3089 // At this point we consider that we have returned. We now check that the
3090 // locks were properly block structured. If we find that they were not
3091 // used properly we will return with an illegal monitor exception.
3092 // The exception is checked by the caller not the callee since this
3093 // checking is considered to be part of the invocation and therefore
3094 // in the callers scope (JVM spec 8.13).
3095 //
3096 // Another weird thing to watch for is if the method was locked
3097 // recursively and then not exited properly. This means we must
3098 // examine all the entries in reverse time(and stack) order and
3099 // unlock as we find them. If we find the method monitor before
3100 // we are at the initial entry then we should throw an exception.
3101 // It is not clear the template based interpreter does this
3102 // correctly
3103
3104 BasicObjectLock* base = istate->monitor_base();
3105 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
3106 bool method_unlock_needed = METHOD->is_synchronized();
3107 // We know the initial monitor was used for the method don't check that
3108 // slot in the loop
3109 if (method_unlock_needed) base--;
3110
3111 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
3112 while (end < base) {
3113 oop lockee = end->obj();
3114 if (lockee != nullptr) {
3115 InterpreterRuntime::monitorexit(end);
3116
3117 // One error is plenty
3118 if (illegal_state_oop() == nullptr && !suppress_error) {
3119 {
3120 // Prevent any HandleMarkCleaner from freeing our live handles
3121 HandleMark __hm(THREAD);
3122 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3123 }
3124 assert(THREAD->has_pending_exception(), "Lost our exception!");
3125 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3126 THREAD->clear_pending_exception();
3127 }
3128 }
3129 end++;
3130 }
3131 // Unlock the method if needed
3132 if (method_unlock_needed) {
3133 if (base->obj() == nullptr) {
3134 // The method is already unlocked this is not good.
3135 if (illegal_state_oop() == nullptr && !suppress_error) {
3136 {
3137 // Prevent any HandleMarkCleaner from freeing our live handles
3138 HandleMark __hm(THREAD);
3139 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3140 }
3141 assert(THREAD->has_pending_exception(), "Lost our exception!");
3142 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3143 THREAD->clear_pending_exception();
3144 }
3145 } else {
3146 //
3147 // The initial monitor is always used for the method
3148 // However if that slot is no longer the oop for the method it was unlocked
3149 // and reused by something that wasn't unlocked!
3150 //
3151 // deopt can come in with rcvr dead because c2 knows
3152 // its value is preserved in the monitor. So we can't use locals[0] at all
3153 // and must use first monitor slot.
3154 //
3155 oop rcvr = base->obj();
3156 if (rcvr == nullptr) {
3157 if (!suppress_error) {
3158 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
3159 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3160 THREAD->clear_pending_exception();
3161 }
3162 } else {
3163 InterpreterRuntime::monitorexit(base);
3164 if (THREAD->has_pending_exception()) {
3165 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3166 THREAD->clear_pending_exception();
3167 }
3168 }
3169 }
3170 }
3171 }
3172 // Clear the do_not_unlock flag now.
3173 THREAD->set_do_not_unlock_if_synchronized(false);
3174
3175 //
3176 // Notify jvmti/jvmdi
3177 //
3178 // NOTE: we do not notify a method_exit if we have a pending exception,
3179 // including an exception we generate for unlocking checks. In the former
3180 // case, JVMDI has already been notified by our call for the exception handler
3181 // and in both cases as far as JVMDI is concerned we have already returned.
3182 // If we notify it again JVMDI will be all confused about how many frames
3183 // are still on the stack (4340444).
3184 //
3185 // NOTE Further! It turns out the JVMTI spec in fact expects to see
3186 // method_exit events whenever we leave an activation unless it was done
3187 // for popframe. This is nothing like jvmdi. However we are passing the
3188 // tests at the moment (apparently because they are jvmdi based) so rather
3189 // than change this code and possibly fail tests we will leave it alone
3190 // (with this note) in anticipation of changing the vm and the tests
3191 // simultaneously.
3192
3193 suppress_exit_event = suppress_exit_event || illegal_state_oop() != nullptr;
3194
3195 // Whenever JVMTI puts a thread in interp_only_mode, method
3196 // entry/exit events are sent for that thread to track stack depth.
3197
3198 if (JVMTI_ENABLED && !suppress_exit_event && THREAD->is_interp_only_mode()) {
3199 // Prevent any HandleMarkCleaner from freeing our live handles
3200 HandleMark __hm(THREAD);
3201 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
3202 }
3203
3204 //
3205 // See if we are returning any exception
3206 // A pending exception that was pending prior to a possible popping frame
3207 // overrides the popping frame.
3208 //
3209 assert(!suppress_error || (suppress_error && illegal_state_oop() == nullptr), "Error was not suppressed");
3210 if (illegal_state_oop() != nullptr || original_exception() != nullptr) {
3211 // Inform the frame manager we have no result.
3212 istate->set_msg(throwing_exception);
3213 if (illegal_state_oop() != nullptr)
3214 THREAD->set_pending_exception(illegal_state_oop(), nullptr, 0);
3215 else
3216 THREAD->set_pending_exception(original_exception(), nullptr, 0);
3217 UPDATE_PC_AND_RETURN(0);
3218 }
3219
3220 if (istate->msg() == popping_frame) {
3221 // Make it simpler on the assembly code and set the message for the frame pop.
3222 // returns
3223 if (istate->prev() == nullptr) {
3224 // We must be returning to a deoptimized frame (because popframe only happens between
3225 // two interpreted frames). We need to save the current arguments in C heap so that
3226 // the deoptimized frame when it restarts can copy the arguments to its expression
3227 // stack and re-execute the call. We also have to notify deoptimization that this
3228 // has occurred and to pick the preserved args copy them to the deoptimized frame's
3229 // java expression stack. Yuck.
3230 //
3231 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
3232 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
3233 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
3234 }
3235 } else {
3236 istate->set_msg(return_from_method);
3237 }
3238
3239 // Normal return
3240 // Advance the pc and return to frame manager
3241 UPDATE_PC_AND_RETURN(1);
3242 } /* handle_return: */
3243
3244 // This is really a fatal error return
3245
3246 finish:
3247 DECACHE_TOS();
3248 DECACHE_PC();
3249
3250 return;
3251 }
3252
3253 // This constructor should only be used to construct the object to signal
3254 // interpreter initialization. All other instances should be created by
3255 // the frame manager.
3256 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
3257 if (msg != initialize) ShouldNotReachHere();
3258 _msg = msg;
3259 _self_link = this;
3260 _prev_link = nullptr;
3261 }
3262
3263 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
3264 intptr_t* locals, int locals_offset) {
3265 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
3266 locals[Interpreter::local_index_at(-locals_offset)] = value;
3267 }
3268
3269 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
3270 int to_offset) {
3271 tos[Interpreter::expr_index_at(-to_offset)] =
3272 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
3273 }
3274
3275 void BytecodeInterpreter::dup(intptr_t *tos) {
3276 copy_stack_slot(tos, -1, 0);
3277 }
3278
3279 void BytecodeInterpreter::dup2(intptr_t *tos) {
3280 copy_stack_slot(tos, -2, 0);
3281 copy_stack_slot(tos, -1, 1);
3282 }
3283
3284 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
3285 /* insert top word two down */
3286 copy_stack_slot(tos, -1, 0);
3287 copy_stack_slot(tos, -2, -1);
3288 copy_stack_slot(tos, 0, -2);
3289 }
3290
3291 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
3292 /* insert top word three down */
3293 copy_stack_slot(tos, -1, 0);
3294 copy_stack_slot(tos, -2, -1);
3295 copy_stack_slot(tos, -3, -2);
3296 copy_stack_slot(tos, 0, -3);
3297 }
3298 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
3299 /* insert top 2 slots three down */
3300 copy_stack_slot(tos, -1, 1);
3301 copy_stack_slot(tos, -2, 0);
3302 copy_stack_slot(tos, -3, -1);
3303 copy_stack_slot(tos, 1, -2);
3304 copy_stack_slot(tos, 0, -3);
3305 }
3306 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
3307 /* insert top 2 slots four down */
3308 copy_stack_slot(tos, -1, 1);
3309 copy_stack_slot(tos, -2, 0);
3310 copy_stack_slot(tos, -3, -1);
3311 copy_stack_slot(tos, -4, -2);
3312 copy_stack_slot(tos, 1, -3);
3313 copy_stack_slot(tos, 0, -4);
3314 }
3315
3316
3317 void BytecodeInterpreter::swap(intptr_t *tos) {
3318 // swap top two elements
3319 intptr_t val = tos[Interpreter::expr_index_at(1)];
3320 // Copy -2 entry to -1
3321 copy_stack_slot(tos, -2, -1);
3322 // Store saved -1 entry into -2
3323 tos[Interpreter::expr_index_at(2)] = val;
3324 }
3325 // --------------------------------------------------------------------------------
3326 // Non-product code
3327 #ifndef PRODUCT
3328
3329 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
3330 switch (msg) {
3331 case BytecodeInterpreter::no_request: return("no_request");
3332 case BytecodeInterpreter::initialize: return("initialize");
3333 // status message to C++ interpreter
3334 case BytecodeInterpreter::method_entry: return("method_entry");
3335 case BytecodeInterpreter::method_resume: return("method_resume");
3336 case BytecodeInterpreter::got_monitors: return("got_monitors");
3337 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
3338 // requests to frame manager from C++ interpreter
3339 case BytecodeInterpreter::call_method: return("call_method");
3340 case BytecodeInterpreter::return_from_method: return("return_from_method");
3341 case BytecodeInterpreter::more_monitors: return("more_monitors");
3342 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
3343 case BytecodeInterpreter::popping_frame: return("popping_frame");
3344 case BytecodeInterpreter::do_osr: return("do_osr");
3345 // deopt
3346 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
3347 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
3348 default: return("BAD MSG");
3349 }
3350 }
3351 void
3352 BytecodeInterpreter::print() {
3353 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
3354 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
3355 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
3356 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
3357 {
3358 ResourceMark rm;
3359 char *method_name = _method->name_and_sig_as_C_string();
3360 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
3361 }
3362 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
3363 tty->print_cr("msg: %s", C_msg(this->_msg));
3364 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
3365 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
3366 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
3367 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
3368 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
3369 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
3370 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp));
3371 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
3372 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
3373 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
3374 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
3375 }
3376
3377 extern "C" {
3378 void PI(uintptr_t arg) {
3379 ((BytecodeInterpreter*)arg)->print();
3380 }
3381 }
3382 #endif // PRODUCT