1 /*
2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/cdsConfig.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "interpreter/bytecodeHistogram.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "interpreter/zero/bytecodeInterpreter.inline.hpp"
35 #include "jvm_io.h"
36 #include "logging/log.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "memory/universe.hpp"
39 #include "oops/constantPool.inline.hpp"
40 #include "oops/cpCache.inline.hpp"
41 #include "oops/instanceKlass.inline.hpp"
42 #include "oops/klass.inline.hpp"
43 #include "oops/method.inline.hpp"
44 #include "oops/methodCounters.hpp"
45 #include "oops/objArrayKlass.hpp"
46 #include "oops/objArrayOop.inline.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "oops/resolvedFieldEntry.hpp"
49 #include "oops/resolvedIndyEntry.hpp"
50 #include "oops/resolvedMethodEntry.hpp"
51 #include "oops/typeArrayOop.inline.hpp"
52 #include "prims/jvmtiExport.hpp"
53 #include "prims/jvmtiThreadState.hpp"
54 #include "runtime/atomicAccess.hpp"
55 #include "runtime/basicLock.inline.hpp"
56 #include "runtime/frame.inline.hpp"
57 #include "runtime/globals.hpp"
58 #include "runtime/handles.inline.hpp"
59 #include "runtime/interfaceSupport.inline.hpp"
60 #include "runtime/orderAccess.hpp"
61 #include "runtime/sharedRuntime.hpp"
62 #include "utilities/debug.hpp"
63 #include "utilities/exceptions.hpp"
64 #include "utilities/globalDefinitions.hpp"
65 #include "utilities/macros.hpp"
66
67 /*
68 * USELABELS - If using GCC, then use labels for the opcode dispatching
69 * rather -then a switch statement. This improves performance because it
70 * gives us the opportunity to have the instructions that calculate the
71 * next opcode to jump to be intermixed with the rest of the instructions
72 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
73 */
74 #undef USELABELS
75 #ifdef __GNUC__
76 /*
77 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
78 don't use the computed goto approach.
79 */
80 #ifndef ASSERT
81 #define USELABELS
82 #endif
83 #endif
84
85 #undef CASE
86 #ifdef USELABELS
87 #define CASE(opcode) opc ## opcode
88 #define DEFAULT opc_default
89 #else
90 #define CASE(opcode) case Bytecodes:: opcode
91 #define DEFAULT default
92 #endif
93
94 /*
95 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
96 * opcode before going back to the top of the while loop, rather then having
97 * the top of the while loop handle it. This provides a better opportunity
98 * for instruction scheduling. Some compilers just do this prefetch
99 * automatically. Some actually end up with worse performance if you
100 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
101 */
102 #undef PREFETCH_OPCCODE
103 #define PREFETCH_OPCCODE
104
105 JRT_ENTRY(void, at_safepoint(JavaThread* current)) {}
106 JRT_END
107
108 /*
109 Interpreter safepoint: it is expected that the interpreter will have no live
110 handles of its own creation live at an interpreter safepoint. Therefore we
111 run a HandleMarkCleaner and trash all handles allocated in the call chain
112 since the JavaCalls::call_helper invocation that initiated the chain.
113 There really shouldn't be any handles remaining to trash but this is cheap
114 in relation to a safepoint.
115 */
116 #define RETURN_SAFEPOINT \
117 if (SafepointMechanism::should_process(THREAD)) { \
118 CALL_VM(at_safepoint(THREAD), handle_exception); \
119 }
120
121 /*
122 * VM_JAVA_ERROR - Macro for throwing a java exception from
123 * the interpreter loop. Should really be a CALL_VM but there
124 * is no entry point to do the transition to vm so we just
125 * do it by hand here.
126 */
127 #define VM_JAVA_ERROR_NO_JUMP(name, msg) \
128 DECACHE_STATE(); \
129 SET_LAST_JAVA_FRAME(); \
130 { \
131 ThreadInVMfromJava trans(THREAD); \
132 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
133 } \
134 RESET_LAST_JAVA_FRAME(); \
135 CACHE_STATE();
136
137 // Normal throw of a java error.
138 #define VM_JAVA_ERROR(name, msg) \
139 VM_JAVA_ERROR_NO_JUMP(name, msg) \
140 goto handle_exception;
141
142 #ifdef PRODUCT
143 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
144 #else
145 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
146 { \
147 if (PrintBytecodeHistogram) { \
148 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
149 } \
150 if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) { \
151 BytecodeCounter::_counter_value++; \
152 if (StopInterpreterAt == BytecodeCounter::_counter_value) { \
153 os::breakpoint(); \
154 } \
155 if (TraceBytecodes) { \
156 CALL_VM((void)InterpreterRuntime::trace_bytecode(THREAD, 0, \
157 topOfStack[Interpreter::expr_index_at(1)], \
158 topOfStack[Interpreter::expr_index_at(2)]), \
159 handle_exception); \
160 } \
161 } \
162 }
163 #endif
164
165 #undef DEBUGGER_SINGLE_STEP_NOTIFY
166 #if INCLUDE_JVMTI
167 /* NOTE: (kbr) This macro must be called AFTER the PC has been
168 incremented. JvmtiExport::at_single_stepping_point() may cause a
169 breakpoint opcode to get inserted at the current PC to allow the
170 debugger to coalesce single-step events.
171
172 As a result if we call at_single_stepping_point() we refetch opcode
173 to get the current opcode. This will override any other prefetching
174 that might have occurred.
175 */
176 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
177 { \
178 if (JVMTI_ENABLED && JvmtiExport::should_post_single_step()) { \
179 DECACHE_STATE(); \
180 SET_LAST_JAVA_FRAME(); \
181 ThreadInVMfromJava trans(THREAD); \
182 JvmtiExport::at_single_stepping_point(THREAD, \
183 istate->method(), \
184 pc); \
185 RESET_LAST_JAVA_FRAME(); \
186 CACHE_STATE(); \
187 if (THREAD->has_pending_popframe() && \
188 !THREAD->pop_frame_in_process()) { \
189 goto handle_Pop_Frame; \
190 } \
191 if (THREAD->jvmti_thread_state() && \
192 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
193 goto handle_Early_Return; \
194 } \
195 opcode = *pc; \
196 } \
197 }
198 #else
199 #define DEBUGGER_SINGLE_STEP_NOTIFY()
200 #endif // INCLUDE_JVMTI
201
202 /*
203 * CONTINUE - Macro for executing the next opcode.
204 */
205 #undef CONTINUE
206 #ifdef USELABELS
207 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
208 // initialization (which is is the initialization of the table pointer...)
209 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
210 #define CONTINUE { \
211 opcode = *pc; \
212 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
213 DEBUGGER_SINGLE_STEP_NOTIFY(); \
214 DISPATCH(opcode); \
215 }
216 #else
217 #ifdef PREFETCH_OPCCODE
218 #define CONTINUE { \
219 opcode = *pc; \
220 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
221 DEBUGGER_SINGLE_STEP_NOTIFY(); \
222 continue; \
223 }
224 #else
225 #define CONTINUE { \
226 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
227 DEBUGGER_SINGLE_STEP_NOTIFY(); \
228 continue; \
229 }
230 #endif
231 #endif
232
233
234 #define UPDATE_PC(opsize) {pc += opsize; }
235 /*
236 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
237 */
238 #undef UPDATE_PC_AND_TOS
239 #define UPDATE_PC_AND_TOS(opsize, stack) \
240 {pc += opsize; MORE_STACK(stack); }
241
242 /*
243 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
244 * and executing the next opcode. It's somewhat similar to the combination
245 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
246 */
247 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
248 #ifdef USELABELS
249 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
250 pc += opsize; opcode = *pc; MORE_STACK(stack); \
251 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
252 DEBUGGER_SINGLE_STEP_NOTIFY(); \
253 DISPATCH(opcode); \
254 }
255
256 #define UPDATE_PC_AND_CONTINUE(opsize) { \
257 pc += opsize; opcode = *pc; \
258 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
259 DEBUGGER_SINGLE_STEP_NOTIFY(); \
260 DISPATCH(opcode); \
261 }
262 #else
263 #ifdef PREFETCH_OPCCODE
264 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
265 pc += opsize; opcode = *pc; MORE_STACK(stack); \
266 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
267 DEBUGGER_SINGLE_STEP_NOTIFY(); \
268 goto do_continue; \
269 }
270
271 #define UPDATE_PC_AND_CONTINUE(opsize) { \
272 pc += opsize; opcode = *pc; \
273 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
274 DEBUGGER_SINGLE_STEP_NOTIFY(); \
275 goto do_continue; \
276 }
277 #else
278 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
279 pc += opsize; MORE_STACK(stack); \
280 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
281 DEBUGGER_SINGLE_STEP_NOTIFY(); \
282 goto do_continue; \
283 }
284
285 #define UPDATE_PC_AND_CONTINUE(opsize) { \
286 pc += opsize; \
287 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
288 DEBUGGER_SINGLE_STEP_NOTIFY(); \
289 goto do_continue; \
290 }
291 #endif /* PREFETCH_OPCCODE */
292 #endif /* USELABELS */
293
294 // About to call a new method, update the save the adjusted pc and return to frame manager
295 #define UPDATE_PC_AND_RETURN(opsize) \
296 DECACHE_TOS(); \
297 istate->set_bcp(pc+opsize); \
298 return;
299
300 #define REWRITE_AT_PC(val) \
301 *pc = val;
302
303 #define METHOD istate->method()
304 #define GET_METHOD_COUNTERS(res)
305 #define DO_BACKEDGE_CHECKS(skip, branch_pc)
306
307 /*
308 * For those opcodes that need to have a GC point on a backwards branch
309 */
310
311 /*
312 * Macros for caching and flushing the interpreter state. Some local
313 * variables need to be flushed out to the frame before we do certain
314 * things (like pushing frames or becoming gc safe) and some need to
315 * be recached later (like after popping a frame). We could use one
316 * macro to cache or decache everything, but this would be less then
317 * optimal because we don't always need to cache or decache everything
318 * because some things we know are already cached or decached.
319 */
320 #undef DECACHE_TOS
321 #undef CACHE_TOS
322 #undef CACHE_PREV_TOS
323 #define DECACHE_TOS() istate->set_stack(topOfStack);
324
325 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
326
327 #undef DECACHE_PC
328 #undef CACHE_PC
329 #define DECACHE_PC() istate->set_bcp(pc);
330 #define CACHE_PC() pc = istate->bcp();
331 #define CACHE_CP() cp = istate->constants();
332 #define CACHE_LOCALS() locals = istate->locals();
333 #undef CACHE_FRAME
334 #define CACHE_FRAME()
335
336 // BCI() returns the current bytecode-index.
337 #undef BCI
338 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
339
340 /*
341 * CHECK_NULL - Macro for throwing a NullPointerException if the object
342 * passed is a null ref.
343 * On some architectures/platforms it should be possible to do this implicitly
344 */
345 #undef CHECK_NULL
346 #define CHECK_NULL_MSG(obj_, msg) \
347 if ((obj_) == nullptr) { \
348 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), (msg)); \
349 } \
350 VERIFY_OOP(obj_)
351 #define CHECK_NULL(obj_) CHECK_NULL_MSG(obj_, nullptr)
352
353 #define VMdoubleConstZero() 0.0
354 #define VMdoubleConstOne() 1.0
355 #define VMlongConstZero() (max_jlong-max_jlong)
356 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
357
358 /*
359 * Alignment
360 */
361 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
362
363 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
364 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
365
366 // Reload interpreter state after calling the VM or a possible GC
367 #define CACHE_STATE() \
368 CACHE_TOS(); \
369 CACHE_PC(); \
370 CACHE_CP(); \
371 CACHE_LOCALS();
372
373 // Call the VM with last java frame only.
374 #define CALL_VM_NAKED_LJF(func) \
375 DECACHE_STATE(); \
376 SET_LAST_JAVA_FRAME(); \
377 func; \
378 RESET_LAST_JAVA_FRAME(); \
379 CACHE_STATE();
380
381 // Call the VM. Don't check for pending exceptions.
382 #define CALL_VM_NOCHECK(func) \
383 CALL_VM_NAKED_LJF(func) \
384 if (THREAD->has_pending_popframe() && \
385 !THREAD->pop_frame_in_process()) { \
386 goto handle_Pop_Frame; \
387 } \
388 if (THREAD->jvmti_thread_state() && \
389 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
390 goto handle_Early_Return; \
391 }
392
393 // Call the VM and check for pending exceptions
394 #define CALL_VM(func, label) { \
395 CALL_VM_NOCHECK(func); \
396 if (THREAD->has_pending_exception()) goto label; \
397 }
398
399 #define MAYBE_POST_FIELD_ACCESS(obj) { \
400 if (JVMTI_ENABLED) { \
401 int* count_addr; \
402 /* Check to see if a field modification watch has been set */ \
403 /* before we take the time to call into the VM. */ \
404 count_addr = (int*)JvmtiExport::get_field_access_count_addr(); \
405 if (*count_addr > 0) { \
406 oop target; \
407 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { \
408 target = nullptr; \
409 } else { \
410 target = obj; \
411 } \
412 CALL_VM(InterpreterRuntime::post_field_access(THREAD, \
413 target, entry), \
414 handle_exception); \
415 } \
416 } \
417 }
418
419 #define MAYBE_POST_FIELD_MODIFICATION(obj) { \
420 if (JVMTI_ENABLED) { \
421 int* count_addr; \
422 /* Check to see if a field modification watch has been set */ \
423 /* before we take the time to call into the VM. */ \
424 count_addr = (int*)JvmtiExport::get_field_modification_count_addr(); \
425 if (*count_addr > 0) { \
426 oop target; \
427 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { \
428 target = nullptr; \
429 } else { \
430 target = obj; \
431 } \
432 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, \
433 target, entry, \
434 (jvalue*)STACK_SLOT(-1)), \
435 handle_exception); \
436 } \
437 } \
438 }
439
440 static inline int fast_get_type(TosState tos) {
441 switch (tos) {
442 case ztos:
443 case btos: return Bytecodes::_fast_bgetfield;
444 case ctos: return Bytecodes::_fast_cgetfield;
445 case stos: return Bytecodes::_fast_sgetfield;
446 case itos: return Bytecodes::_fast_igetfield;
447 case ltos: return Bytecodes::_fast_lgetfield;
448 case ftos: return Bytecodes::_fast_fgetfield;
449 case dtos: return Bytecodes::_fast_dgetfield;
450 case atos: return Bytecodes::_fast_agetfield;
451 default:
452 ShouldNotReachHere();
453 return -1;
454 }
455 }
456
457 static inline int fast_put_type(TosState tos) {
458 switch (tos) {
459 case ztos: return Bytecodes::_fast_zputfield;
460 case btos: return Bytecodes::_fast_bputfield;
461 case ctos: return Bytecodes::_fast_cputfield;
462 case stos: return Bytecodes::_fast_sputfield;
463 case itos: return Bytecodes::_fast_iputfield;
464 case ltos: return Bytecodes::_fast_lputfield;
465 case ftos: return Bytecodes::_fast_fputfield;
466 case dtos: return Bytecodes::_fast_dputfield;
467 case atos: return Bytecodes::_fast_aputfield;
468 default:
469 ShouldNotReachHere();
470 return -1;
471 }
472 }
473
474 /*
475 * BytecodeInterpreter::run(interpreterState istate)
476 *
477 * The real deal. This is where byte codes actually get interpreted.
478 * Basically it's a big while loop that iterates until we return from
479 * the method passed in.
480 */
481
482 // Instantiate variants of the method for future linking.
483 template void BytecodeInterpreter::run<false, false>(interpreterState istate);
484 template void BytecodeInterpreter::run<false, true>(interpreterState istate);
485 template void BytecodeInterpreter::run< true, false>(interpreterState istate);
486 template void BytecodeInterpreter::run< true, true>(interpreterState istate);
487
488 template<bool JVMTI_ENABLED, bool REWRITE_BYTECODES>
489 void BytecodeInterpreter::run(interpreterState istate) {
490 intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
491 address pc = istate->bcp();
492 jubyte opcode;
493 intptr_t* locals = istate->locals();
494 ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
495 #ifdef LOTS_OF_REGS
496 JavaThread* THREAD = istate->thread();
497 #else
498 #undef THREAD
499 #define THREAD istate->thread()
500 #endif
501
502 #ifdef ASSERT
503 assert(labs(istate->stack_base() - istate->stack_limit()) == (istate->method()->max_stack() + 1),
504 "Bad stack limit");
505 /* QQQ this should be a stack method so we don't know actual direction */
506 assert(topOfStack >= istate->stack_limit() && topOfStack < istate->stack_base(),
507 "Stack top out of range");
508
509 // Verify linkages.
510 interpreterState l = istate;
511 do {
512 assert(l == l->_self_link, "bad link");
513 l = l->_prev_link;
514 } while (l != nullptr);
515 // Screwups with stack management usually cause us to overwrite istate
516 // save a copy so we can verify it.
517 interpreterState orig = istate;
518 #endif
519
520 #ifdef USELABELS
521 const static void* const opclabels_data[256] = {
522 /* 0x00 */ &&opc_nop, &&opc_aconst_null, &&opc_iconst_m1, &&opc_iconst_0,
523 /* 0x04 */ &&opc_iconst_1, &&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
524 /* 0x08 */ &&opc_iconst_5, &&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
525 /* 0x0C */ &&opc_fconst_1, &&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
526
527 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
528 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
529 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0, &&opc_iload_1,
530 /* 0x1C */ &&opc_iload_2, &&opc_iload_3, &&opc_lload_0, &&opc_lload_1,
531
532 /* 0x20 */ &&opc_lload_2, &&opc_lload_3, &&opc_fload_0, &&opc_fload_1,
533 /* 0x24 */ &&opc_fload_2, &&opc_fload_3, &&opc_dload_0, &&opc_dload_1,
534 /* 0x28 */ &&opc_dload_2, &&opc_dload_3, &&opc_aload_0, &&opc_aload_1,
535 /* 0x2C */ &&opc_aload_2, &&opc_aload_3, &&opc_iaload, &&opc_laload,
536
537 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
538 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
539 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
540 /* 0x3C */ &&opc_istore_1, &&opc_istore_2, &&opc_istore_3, &&opc_lstore_0,
541
542 /* 0x40 */ &&opc_lstore_1, &&opc_lstore_2, &&opc_lstore_3, &&opc_fstore_0,
543 /* 0x44 */ &&opc_fstore_1, &&opc_fstore_2, &&opc_fstore_3, &&opc_dstore_0,
544 /* 0x48 */ &&opc_dstore_1, &&opc_dstore_2, &&opc_dstore_3, &&opc_astore_0,
545 /* 0x4C */ &&opc_astore_1, &&opc_astore_2, &&opc_astore_3, &&opc_iastore,
546
547 /* 0x50 */ &&opc_lastore, &&opc_fastore, &&opc_dastore, &&opc_aastore,
548 /* 0x54 */ &&opc_bastore, &&opc_castore, &&opc_sastore, &&opc_pop,
549 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
550 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1, &&opc_dup2_x2, &&opc_swap,
551
552 /* 0x60 */ &&opc_iadd, &&opc_ladd, &&opc_fadd, &&opc_dadd,
553 /* 0x64 */ &&opc_isub, &&opc_lsub, &&opc_fsub, &&opc_dsub,
554 /* 0x68 */ &&opc_imul, &&opc_lmul, &&opc_fmul, &&opc_dmul,
555 /* 0x6C */ &&opc_idiv, &&opc_ldiv, &&opc_fdiv, &&opc_ddiv,
556
557 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem, &&opc_drem,
558 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg, &&opc_dneg,
559 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr, &&opc_lshr,
560 /* 0x7C */ &&opc_iushr, &&opc_lushr, &&opc_iand, &&opc_land,
561
562 /* 0x80 */ &&opc_ior, &&opc_lor, &&opc_ixor, &&opc_lxor,
563 /* 0x84 */ &&opc_iinc, &&opc_i2l, &&opc_i2f, &&opc_i2d,
564 /* 0x88 */ &&opc_l2i, &&opc_l2f, &&opc_l2d, &&opc_f2i,
565 /* 0x8C */ &&opc_f2l, &&opc_f2d, &&opc_d2i, &&opc_d2l,
566
567 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
568 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl, &&opc_fcmpg, &&opc_dcmpl,
569 /* 0x98 */ &&opc_dcmpg, &&opc_ifeq, &&opc_ifne, &&opc_iflt,
570 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
571
572 /* 0xA0 */ &&opc_if_icmpne, &&opc_if_icmplt, &&opc_if_icmpge, &&opc_if_icmpgt,
573 /* 0xA4 */ &&opc_if_icmple, &&opc_if_acmpeq, &&opc_if_acmpne, &&opc_goto,
574 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch, &&opc_lookupswitch,
575 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
576
577 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
578 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual, &&opc_invokespecial,
579 /* 0xB8 */ &&opc_invokestatic, &&opc_invokeinterface, &&opc_invokedynamic, &&opc_new,
580 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
581
582 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
583 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
584 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_fast_agetfield,
585 /* 0xCC */ &&opc_default, &&opc_fast_bgetfield, &&opc_fast_cgetfield, &&opc_fast_dgetfield,
586
587 /* 0xD0 */ &&opc_fast_fgetfield, &&opc_fast_igetfield, &&opc_fast_lgetfield, &&opc_fast_sgetfield,
588 /* 0xD4 */ &&opc_fast_aputfield, &&opc_default, &&opc_fast_bputfield, &&opc_fast_zputfield,
589 /* 0xD8 */ &&opc_fast_cputfield, &&opc_fast_dputfield, &&opc_fast_fputfield, &&opc_fast_iputfield,
590 /* 0xDC */ &&opc_fast_lputfield, &&opc_fast_sputfield, &&opc_fast_aload_0, &&opc_fast_iaccess_0,
591
592 /* 0xE0 */ &&opc_fast_aaccess_0, &&opc_fast_faccess_0, &&opc_fast_iload, &&opc_fast_iload2,
593 /* 0xE4 */ &&opc_fast_icaload, &&opc_fast_invokevfinal, &&opc_default, &&opc_default,
594 /* 0xE8 */ &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, &&opc_invokehandle,
595 /* 0xEC */ &&opc_nofast_getfield, &&opc_nofast_putfield, &&opc_nofast_aload_0, &&opc_nofast_iload,
596
597 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
598 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
599 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
600 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
601 };
602 uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
603 #endif /* USELABELS */
604
605 switch (istate->msg()) {
606 case initialize: {
607 ShouldNotCallThis();
608 return;
609 }
610 case method_entry: {
611 THREAD->set_do_not_unlock_if_synchronized(true);
612
613 // Lock method if synchronized.
614 if (METHOD->is_synchronized()) {
615 // oop rcvr = locals[0].j.r;
616 oop rcvr;
617 if (METHOD->is_static()) {
618 rcvr = METHOD->constants()->pool_holder()->java_mirror();
619 } else {
620 rcvr = LOCALS_OBJECT(0);
621 VERIFY_OOP(rcvr);
622 }
623
624 // The initial monitor is ours for the taking.
625 BasicObjectLock* mon = &istate->monitor_base()[-1];
626 mon->set_obj(rcvr);
627 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
628 }
629 THREAD->set_do_not_unlock_if_synchronized(false);
630
631 // Notify jvmti.
632 // Whenever JVMTI puts a thread in interp_only_mode, method
633 // entry/exit events are sent for that thread to track stack depth.
634 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
635 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
636 handle_exception);
637 }
638
639 goto run;
640 }
641
642 case popping_frame: {
643 // returned from a java call to pop the frame, restart the call
644 // clear the message so we don't confuse ourselves later
645 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
646 istate->set_msg(no_request);
647 THREAD->clr_pop_frame_in_process();
648 goto run;
649 }
650
651 case method_resume: {
652 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
653 // resume
654 os::breakpoint();
655 }
656 // returned from a java call, continue executing.
657 if (THREAD->has_pending_popframe() && !THREAD->pop_frame_in_process()) {
658 goto handle_Pop_Frame;
659 }
660 if (THREAD->jvmti_thread_state() &&
661 THREAD->jvmti_thread_state()->is_earlyret_pending()) {
662 goto handle_Early_Return;
663 }
664
665 if (THREAD->has_pending_exception()) goto handle_exception;
666 // Update the pc by the saved amount of the invoke bytecode size
667 UPDATE_PC(istate->bcp_advance());
668 goto run;
669 }
670
671 case deopt_resume2: {
672 // Returned from an opcode that will reexecute. Deopt was
673 // a result of a PopFrame request.
674 //
675 goto run;
676 }
677
678 case deopt_resume: {
679 // Returned from an opcode that has completed. The stack has
680 // the result all we need to do is skip across the bytecode
681 // and continue (assuming there is no exception pending)
682 //
683 // compute continuation length
684 //
685 // Note: it is possible to deopt at a return_register_finalizer opcode
686 // because this requires entering the vm to do the registering. While the
687 // opcode is complete we can't advance because there are no more opcodes
688 // much like trying to deopt at a poll return. In that has we simply
689 // get out of here
690 //
691 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
692 // this will do the right thing even if an exception is pending.
693 goto handle_return;
694 }
695 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
696 if (THREAD->has_pending_exception()) goto handle_exception;
697 goto run;
698 }
699 case got_monitors: {
700 // continue locking now that we have a monitor to use
701 // we expect to find newly allocated monitor at the "top" of the monitor stack.
702 oop lockee = STACK_OBJECT(-1);
703 VERIFY_OOP(lockee);
704 // derefing's lockee ought to provoke implicit null check
705 // find a free monitor
706 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
707 assert(entry->obj() == nullptr, "Frame manager didn't allocate the monitor");
708 entry->set_obj(lockee);
709 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
710 UPDATE_PC_AND_TOS(1, -1);
711 goto run;
712 }
713 default: {
714 fatal("Unexpected message from frame manager");
715 }
716 }
717
718 run:
719
720 DO_UPDATE_INSTRUCTION_COUNT(*pc)
721 DEBUGGER_SINGLE_STEP_NOTIFY();
722 #ifdef PREFETCH_OPCCODE
723 opcode = *pc; /* prefetch first opcode */
724 #endif
725
726 #ifndef USELABELS
727 while (1)
728 #endif
729 {
730 #ifndef PREFETCH_OPCCODE
731 opcode = *pc;
732 #endif
733 // Seems like this happens twice per opcode. At worst this is only
734 // need at entry to the loop.
735 // DEBUGGER_SINGLE_STEP_NOTIFY();
736 /* Using this labels avoids double breakpoints when quickening and
737 * when returning from transition frames.
738 */
739 opcode_switch:
740 assert(istate == orig, "Corrupted istate");
741 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
742 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
743 assert(topOfStack < istate->stack_base(), "Stack underrun");
744
745 #ifdef USELABELS
746 DISPATCH(opcode);
747 #else
748 switch (opcode)
749 #endif
750 {
751 CASE(_nop):
752 UPDATE_PC_AND_CONTINUE(1);
753
754 /* Push miscellaneous constants onto the stack. */
755
756 CASE(_aconst_null):
757 SET_STACK_OBJECT(nullptr, 0);
758 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
759
760 #undef OPC_CONST_n
761 #define OPC_CONST_n(opcode, const_type, value) \
762 CASE(opcode): \
763 SET_STACK_ ## const_type(value, 0); \
764 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
765
766 OPC_CONST_n(_iconst_m1, INT, -1);
767 OPC_CONST_n(_iconst_0, INT, 0);
768 OPC_CONST_n(_iconst_1, INT, 1);
769 OPC_CONST_n(_iconst_2, INT, 2);
770 OPC_CONST_n(_iconst_3, INT, 3);
771 OPC_CONST_n(_iconst_4, INT, 4);
772 OPC_CONST_n(_iconst_5, INT, 5);
773 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
774 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
775 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
776
777 #undef OPC_CONST2_n
778 #define OPC_CONST2_n(opcname, value, key, kind) \
779 CASE(_##opcname): \
780 { \
781 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
782 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
783 }
784 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
785 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
786 OPC_CONST2_n(lconst_0, Zero, long, LONG);
787 OPC_CONST2_n(lconst_1, One, long, LONG);
788
789 /* Load constant from constant pool: */
790
791 /* Push a 1-byte signed integer value onto the stack. */
792 CASE(_bipush):
793 SET_STACK_INT((jbyte)(pc[1]), 0);
794 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
795
796 /* Push a 2-byte signed integer constant onto the stack. */
797 CASE(_sipush):
798 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
799 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
800
801 /* load from local variable */
802
803 CASE(_aload):
804 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
805 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
806 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
807
808 CASE(_iload):
809 {
810 if (REWRITE_BYTECODES) {
811 // Attempt to rewrite iload, iload -> fast_iload2
812 // iload, caload -> fast_icaload
813 // Normal iloads will be rewritten to fast_iload to avoid checking again.
814 switch (*(pc + 2)) {
815 case Bytecodes::_fast_iload:
816 REWRITE_AT_PC(Bytecodes::_fast_iload2);
817 break;
818 case Bytecodes::_caload:
819 REWRITE_AT_PC(Bytecodes::_fast_icaload);
820 break;
821 case Bytecodes::_iload:
822 // Wait until rewritten to _fast_iload.
823 break;
824 default:
825 // Last iload in a (potential) series, don't check again.
826 REWRITE_AT_PC(Bytecodes::_fast_iload);
827 }
828 }
829 // Normal iload handling.
830 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
831 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
832 }
833
834 CASE(_nofast_iload):
835 {
836 // Normal, non-rewritable iload handling.
837 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
838 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
839 }
840
841 CASE(_fast_iload):
842 CASE(_fload):
843 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
844 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
845
846 CASE(_fast_iload2):
847 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
848 SET_STACK_SLOT(LOCALS_SLOT(pc[3]), 1);
849 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
850
851 CASE(_lload):
852 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
853 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
854
855 CASE(_dload):
856 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
857 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
858
859 #undef OPC_LOAD_n
860 #define OPC_LOAD_n(num) \
861 CASE(_iload_##num): \
862 CASE(_fload_##num): \
863 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
864 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
865 \
866 CASE(_lload_##num): \
867 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
868 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
869 CASE(_dload_##num): \
870 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
871 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
872
873 OPC_LOAD_n(0);
874 OPC_LOAD_n(1);
875 OPC_LOAD_n(2);
876 OPC_LOAD_n(3);
877
878 #undef OPC_ALOAD_n
879 #define OPC_ALOAD_n(num) \
880 CASE(_aload_##num): { \
881 oop obj = LOCALS_OBJECT(num); \
882 VERIFY_OOP(obj); \
883 SET_STACK_OBJECT(obj, 0); \
884 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
885 }
886
887 CASE(_aload_0):
888 {
889 /* Maybe rewrite if following bytecode is one of the supported _fast_Xgetfield bytecodes. */
890 if (REWRITE_BYTECODES) {
891 switch (*(pc + 1)) {
892 case Bytecodes::_fast_agetfield:
893 REWRITE_AT_PC(Bytecodes::_fast_aaccess_0);
894 break;
895 case Bytecodes::_fast_fgetfield:
896 REWRITE_AT_PC(Bytecodes::_fast_faccess_0);
897 break;
898 case Bytecodes::_fast_igetfield:
899 REWRITE_AT_PC(Bytecodes::_fast_iaccess_0);
900 break;
901 case Bytecodes::_getfield:
902 case Bytecodes::_nofast_getfield: {
903 /* Otherwise, do nothing here, wait until/if it gets rewritten to _fast_Xgetfield.
904 * Unfortunately, this punishes volatile field access, because it never gets
905 * rewritten. */
906 break;
907 }
908 default:
909 REWRITE_AT_PC(Bytecodes::_fast_aload_0);
910 break;
911 }
912 }
913 // Normal aload_0 handling.
914 VERIFY_OOP(LOCALS_OBJECT(0));
915 SET_STACK_OBJECT(LOCALS_OBJECT(0), 0);
916 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
917 }
918
919 CASE(_nofast_aload_0):
920 {
921 // Normal, non-rewritable aload_0 handling.
922 VERIFY_OOP(LOCALS_OBJECT(0));
923 SET_STACK_OBJECT(LOCALS_OBJECT(0), 0);
924 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
925 }
926
927 OPC_ALOAD_n(1);
928 OPC_ALOAD_n(2);
929 OPC_ALOAD_n(3);
930
931 /* store to a local variable */
932
933 CASE(_astore):
934 astore(topOfStack, -1, locals, pc[1]);
935 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
936
937 CASE(_istore):
938 CASE(_fstore):
939 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
940 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
941
942 CASE(_lstore):
943 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
944 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
945
946 CASE(_dstore):
947 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
948 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
949
950 CASE(_wide): {
951 uint16_t reg = Bytes::get_Java_u2(pc + 2);
952
953 opcode = pc[1];
954
955 // Wide and it's sub-bytecode are counted as separate instructions. If we
956 // don't account for this here, the bytecode trace skips the next bytecode.
957 DO_UPDATE_INSTRUCTION_COUNT(opcode);
958
959 switch(opcode) {
960 case Bytecodes::_aload:
961 VERIFY_OOP(LOCALS_OBJECT(reg));
962 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
963 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
964
965 case Bytecodes::_iload:
966 case Bytecodes::_fload:
967 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
968 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
969
970 case Bytecodes::_lload:
971 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
972 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
973
974 case Bytecodes::_dload:
975 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
976 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
977
978 case Bytecodes::_astore:
979 astore(topOfStack, -1, locals, reg);
980 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
981
982 case Bytecodes::_istore:
983 case Bytecodes::_fstore:
984 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
985 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
986
987 case Bytecodes::_lstore:
988 SET_LOCALS_LONG(STACK_LONG(-1), reg);
989 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
990
991 case Bytecodes::_dstore:
992 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
993 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
994
995 case Bytecodes::_iinc: {
996 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
997 // Be nice to see what this generates.... QQQ
998 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
999 UPDATE_PC_AND_CONTINUE(6);
1000 }
1001 case Bytecodes::_ret:
1002 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
1003 UPDATE_PC_AND_CONTINUE(0);
1004 default:
1005 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode");
1006 }
1007 }
1008
1009
1010 #undef OPC_STORE_n
1011 #define OPC_STORE_n(num) \
1012 CASE(_astore_##num): \
1013 astore(topOfStack, -1, locals, num); \
1014 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1015 CASE(_istore_##num): \
1016 CASE(_fstore_##num): \
1017 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
1018 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1019
1020 OPC_STORE_n(0);
1021 OPC_STORE_n(1);
1022 OPC_STORE_n(2);
1023 OPC_STORE_n(3);
1024
1025 #undef OPC_DSTORE_n
1026 #define OPC_DSTORE_n(num) \
1027 CASE(_dstore_##num): \
1028 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
1029 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1030 CASE(_lstore_##num): \
1031 SET_LOCALS_LONG(STACK_LONG(-1), num); \
1032 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1033
1034 OPC_DSTORE_n(0);
1035 OPC_DSTORE_n(1);
1036 OPC_DSTORE_n(2);
1037 OPC_DSTORE_n(3);
1038
1039 /* stack pop, dup, and insert opcodes */
1040
1041
1042 CASE(_pop): /* Discard the top item on the stack */
1043 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1044
1045
1046 CASE(_pop2): /* Discard the top 2 items on the stack */
1047 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1048
1049
1050 CASE(_dup): /* Duplicate the top item on the stack */
1051 dup(topOfStack);
1052 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1053
1054 CASE(_dup2): /* Duplicate the top 2 items on the stack */
1055 dup2(topOfStack);
1056 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1057
1058 CASE(_dup_x1): /* insert top word two down */
1059 dup_x1(topOfStack);
1060 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1061
1062 CASE(_dup_x2): /* insert top word three down */
1063 dup_x2(topOfStack);
1064 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1065
1066 CASE(_dup2_x1): /* insert top 2 slots three down */
1067 dup2_x1(topOfStack);
1068 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1069
1070 CASE(_dup2_x2): /* insert top 2 slots four down */
1071 dup2_x2(topOfStack);
1072 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1073
1074 CASE(_swap): { /* swap top two elements on the stack */
1075 swap(topOfStack);
1076 UPDATE_PC_AND_CONTINUE(1);
1077 }
1078
1079 /* Perform various binary integer operations */
1080
1081 #undef OPC_INT_BINARY
1082 #define OPC_INT_BINARY(opcname, opname, test) \
1083 CASE(_i##opcname): \
1084 if (test && (STACK_INT(-1) == 0)) { \
1085 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1086 "/ by zero"); \
1087 } \
1088 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1089 STACK_INT(-1)), \
1090 -2); \
1091 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1092 CASE(_l##opcname): \
1093 { \
1094 if (test) { \
1095 jlong l1 = STACK_LONG(-1); \
1096 if (VMlongEqz(l1)) { \
1097 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1098 "/ by long zero"); \
1099 } \
1100 } \
1101 /* First long at (-1,-2) next long at (-3,-4) */ \
1102 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
1103 STACK_LONG(-1)), \
1104 -3); \
1105 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1106 }
1107
1108 OPC_INT_BINARY(add, Add, 0);
1109 OPC_INT_BINARY(sub, Sub, 0);
1110 OPC_INT_BINARY(mul, Mul, 0);
1111 OPC_INT_BINARY(and, And, 0);
1112 OPC_INT_BINARY(or, Or, 0);
1113 OPC_INT_BINARY(xor, Xor, 0);
1114 OPC_INT_BINARY(div, Div, 1);
1115 OPC_INT_BINARY(rem, Rem, 1);
1116
1117
1118 /* Perform various binary floating number operations */
1119 /* On some machine/platforms/compilers div zero check can be implicit */
1120
1121 #undef OPC_FLOAT_BINARY
1122 #define OPC_FLOAT_BINARY(opcname, opname) \
1123 CASE(_d##opcname): { \
1124 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
1125 STACK_DOUBLE(-1)), \
1126 -3); \
1127 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1128 } \
1129 CASE(_f##opcname): \
1130 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
1131 STACK_FLOAT(-1)), \
1132 -2); \
1133 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1134
1135
1136 OPC_FLOAT_BINARY(add, Add);
1137 OPC_FLOAT_BINARY(sub, Sub);
1138 OPC_FLOAT_BINARY(mul, Mul);
1139 OPC_FLOAT_BINARY(div, Div);
1140 OPC_FLOAT_BINARY(rem, Rem);
1141
1142 /* Shift operations
1143 * Shift left int and long: ishl, lshl
1144 * Logical shift right int and long w/zero extension: iushr, lushr
1145 * Arithmetic shift right int and long w/sign extension: ishr, lshr
1146 */
1147
1148 #undef OPC_SHIFT_BINARY
1149 #define OPC_SHIFT_BINARY(opcname, opname) \
1150 CASE(_i##opcname): \
1151 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1152 STACK_INT(-1)), \
1153 -2); \
1154 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1155 CASE(_l##opcname): \
1156 { \
1157 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
1158 STACK_INT(-1)), \
1159 -2); \
1160 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1161 }
1162
1163 OPC_SHIFT_BINARY(shl, Shl);
1164 OPC_SHIFT_BINARY(shr, Shr);
1165 OPC_SHIFT_BINARY(ushr, Ushr);
1166
1167 /* Increment local variable by constant */
1168 CASE(_iinc):
1169 {
1170 // locals[pc[1]].j.i += (jbyte)(pc[2]);
1171 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
1172 UPDATE_PC_AND_CONTINUE(3);
1173 }
1174
1175 /* negate the value on the top of the stack */
1176
1177 CASE(_ineg):
1178 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
1179 UPDATE_PC_AND_CONTINUE(1);
1180
1181 CASE(_fneg):
1182 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
1183 UPDATE_PC_AND_CONTINUE(1);
1184
1185 CASE(_lneg):
1186 {
1187 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
1188 UPDATE_PC_AND_CONTINUE(1);
1189 }
1190
1191 CASE(_dneg):
1192 {
1193 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
1194 UPDATE_PC_AND_CONTINUE(1);
1195 }
1196
1197 /* Conversion operations */
1198
1199 CASE(_i2f): /* convert top of stack int to float */
1200 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
1201 UPDATE_PC_AND_CONTINUE(1);
1202
1203 CASE(_i2l): /* convert top of stack int to long */
1204 {
1205 // this is ugly QQQ
1206 jlong r = VMint2Long(STACK_INT(-1));
1207 MORE_STACK(-1); // Pop
1208 SET_STACK_LONG(r, 1);
1209
1210 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1211 }
1212
1213 CASE(_i2d): /* convert top of stack int to double */
1214 {
1215 // this is ugly QQQ (why cast to jlong?? )
1216 jdouble r = (jlong)STACK_INT(-1);
1217 MORE_STACK(-1); // Pop
1218 SET_STACK_DOUBLE(r, 1);
1219
1220 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1221 }
1222
1223 CASE(_l2i): /* convert top of stack long to int */
1224 {
1225 jint r = VMlong2Int(STACK_LONG(-1));
1226 MORE_STACK(-2); // Pop
1227 SET_STACK_INT(r, 0);
1228 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1229 }
1230
1231 CASE(_l2f): /* convert top of stack long to float */
1232 {
1233 jlong r = STACK_LONG(-1);
1234 MORE_STACK(-2); // Pop
1235 SET_STACK_FLOAT(VMlong2Float(r), 0);
1236 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1237 }
1238
1239 CASE(_l2d): /* convert top of stack long to double */
1240 {
1241 jlong r = STACK_LONG(-1);
1242 MORE_STACK(-2); // Pop
1243 SET_STACK_DOUBLE(VMlong2Double(r), 1);
1244 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1245 }
1246
1247 CASE(_f2i): /* Convert top of stack float to int */
1248 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
1249 UPDATE_PC_AND_CONTINUE(1);
1250
1251 CASE(_f2l): /* convert top of stack float to long */
1252 {
1253 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
1254 MORE_STACK(-1); // POP
1255 SET_STACK_LONG(r, 1);
1256 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1257 }
1258
1259 CASE(_f2d): /* convert top of stack float to double */
1260 {
1261 jfloat f;
1262 jdouble r;
1263 f = STACK_FLOAT(-1);
1264 r = (jdouble) f;
1265 MORE_STACK(-1); // POP
1266 SET_STACK_DOUBLE(r, 1);
1267 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1268 }
1269
1270 CASE(_d2i): /* convert top of stack double to int */
1271 {
1272 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
1273 MORE_STACK(-2);
1274 SET_STACK_INT(r1, 0);
1275 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1276 }
1277
1278 CASE(_d2f): /* convert top of stack double to float */
1279 {
1280 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
1281 MORE_STACK(-2);
1282 SET_STACK_FLOAT(r1, 0);
1283 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1284 }
1285
1286 CASE(_d2l): /* convert top of stack double to long */
1287 {
1288 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
1289 MORE_STACK(-2);
1290 SET_STACK_LONG(r1, 1);
1291 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1292 }
1293
1294 CASE(_i2b):
1295 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
1296 UPDATE_PC_AND_CONTINUE(1);
1297
1298 CASE(_i2c):
1299 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
1300 UPDATE_PC_AND_CONTINUE(1);
1301
1302 CASE(_i2s):
1303 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
1304 UPDATE_PC_AND_CONTINUE(1);
1305
1306 /* comparison operators */
1307
1308
1309 #define COMPARISON_OP(name, comparison) \
1310 CASE(_if_icmp##name): { \
1311 int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \
1312 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1313 address branch_pc = pc; \
1314 UPDATE_PC_AND_TOS(skip, -2); \
1315 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1316 CONTINUE; \
1317 } \
1318 CASE(_if##name): { \
1319 int skip = (STACK_INT(-1) comparison 0) \
1320 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1321 address branch_pc = pc; \
1322 UPDATE_PC_AND_TOS(skip, -1); \
1323 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1324 CONTINUE; \
1325 }
1326
1327 #define COMPARISON_OP2(name, comparison) \
1328 COMPARISON_OP(name, comparison) \
1329 CASE(_if_acmp##name): { \
1330 int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \
1331 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1332 address branch_pc = pc; \
1333 UPDATE_PC_AND_TOS(skip, -2); \
1334 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1335 CONTINUE; \
1336 }
1337
1338 #define NULL_COMPARISON_NOT_OP(name) \
1339 CASE(_if##name): { \
1340 int skip = (!(STACK_OBJECT(-1) == nullptr)) \
1341 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1342 address branch_pc = pc; \
1343 UPDATE_PC_AND_TOS(skip, -1); \
1344 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1345 CONTINUE; \
1346 }
1347
1348 #define NULL_COMPARISON_OP(name) \
1349 CASE(_if##name): { \
1350 int skip = ((STACK_OBJECT(-1) == nullptr)) \
1351 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1352 address branch_pc = pc; \
1353 UPDATE_PC_AND_TOS(skip, -1); \
1354 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1355 CONTINUE; \
1356 }
1357 COMPARISON_OP(lt, <);
1358 COMPARISON_OP(gt, >);
1359 COMPARISON_OP(le, <=);
1360 COMPARISON_OP(ge, >=);
1361 COMPARISON_OP2(eq, ==); /* include ref comparison */
1362 COMPARISON_OP2(ne, !=); /* include ref comparison */
1363 NULL_COMPARISON_OP(null);
1364 NULL_COMPARISON_NOT_OP(nonnull);
1365
1366 /* Goto pc at specified offset in switch table. */
1367
1368 CASE(_tableswitch): {
1369 jint* lpc = (jint*)VMalignWordUp(pc+1);
1370 int32_t key = STACK_INT(-1);
1371 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
1372 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
1373 int32_t skip;
1374 key -= low;
1375 if (((uint32_t) key > (uint32_t)(high - low))) {
1376 skip = Bytes::get_Java_u4((address)&lpc[0]);
1377 } else {
1378 skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
1379 }
1380 // Does this really need a full backedge check (osr)?
1381 address branch_pc = pc;
1382 UPDATE_PC_AND_TOS(skip, -1);
1383 DO_BACKEDGE_CHECKS(skip, branch_pc);
1384 CONTINUE;
1385 }
1386
1387 /* Goto pc whose table entry matches specified key. */
1388
1389 CASE(_lookupswitch): {
1390 jint* lpc = (jint*)VMalignWordUp(pc+1);
1391 int32_t key = STACK_INT(-1);
1392 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
1393 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
1394 while (--npairs >= 0) {
1395 lpc += 2;
1396 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
1397 skip = Bytes::get_Java_u4((address)&lpc[1]);
1398 break;
1399 }
1400 }
1401 address branch_pc = pc;
1402 UPDATE_PC_AND_TOS(skip, -1);
1403 DO_BACKEDGE_CHECKS(skip, branch_pc);
1404 CONTINUE;
1405 }
1406
1407 CASE(_fcmpl):
1408 CASE(_fcmpg):
1409 {
1410 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
1411 STACK_FLOAT(-1),
1412 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
1413 -2);
1414 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1415 }
1416
1417 CASE(_dcmpl):
1418 CASE(_dcmpg):
1419 {
1420 int r = VMdoubleCompare(STACK_DOUBLE(-3),
1421 STACK_DOUBLE(-1),
1422 (opcode == Bytecodes::_dcmpl ? -1 : 1));
1423 MORE_STACK(-4); // Pop
1424 SET_STACK_INT(r, 0);
1425 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1426 }
1427
1428 CASE(_lcmp):
1429 {
1430 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
1431 MORE_STACK(-4);
1432 SET_STACK_INT(r, 0);
1433 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1434 }
1435
1436
1437 /* Return from a method */
1438
1439 CASE(_areturn):
1440 CASE(_ireturn):
1441 CASE(_freturn):
1442 CASE(_lreturn):
1443 CASE(_dreturn):
1444 CASE(_return): {
1445 // Allow a safepoint before returning to frame manager.
1446 RETURN_SAFEPOINT;
1447 goto handle_return;
1448 }
1449
1450 CASE(_return_register_finalizer): {
1451 oop rcvr = LOCALS_OBJECT(0);
1452 VERIFY_OOP(rcvr);
1453 if (rcvr->klass()->has_finalizer()) {
1454 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
1455 }
1456 goto handle_return;
1457 }
1458
1459 /* Array access byte-codes */
1460
1461 #define ARRAY_INDEX_CHECK(arrObj, index) \
1462 /* Two integers, the additional message, and the null-terminator */ \
1463 char message[2 * jintAsStringSize + 33]; \
1464 CHECK_NULL(arrObj); \
1465 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
1466 jio_snprintf(message, sizeof(message), \
1467 "Index %d out of bounds for length %d", \
1468 index, arrObj->length()); \
1469 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
1470 message); \
1471 }
1472
1473 /* Every array access byte-code starts out like this */
1474 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
1475 #define ARRAY_INTRO(arrayOff) \
1476 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
1477 jint index = STACK_INT(arrayOff + 1); \
1478 ARRAY_INDEX_CHECK(arrObj, index)
1479
1480 /* 32-bit loads. These handle conversion from < 32-bit types */
1481 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
1482 { \
1483 ARRAY_INTRO(-2); \
1484 (void)extra; \
1485 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
1486 -2); \
1487 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1488 }
1489
1490 /* 64-bit loads */
1491 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
1492 { \
1493 ARRAY_INTRO(-2); \
1494 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
1495 (void)extra; \
1496 UPDATE_PC_AND_CONTINUE(1); \
1497 }
1498
1499 CASE(_iaload):
1500 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
1501 CASE(_faload):
1502 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1503 CASE(_aaload): {
1504 ARRAY_INTRO(-2);
1505 if (((objArrayOop) arrObj)->is_flatArray()) {
1506 CALL_VM(InterpreterRuntime::flat_array_load(THREAD, (objArrayOop) arrObj, index), handle_exception);
1507 SET_STACK_OBJECT(THREAD->vm_result_oop(),-2);
1508 THREAD->set_vm_result_oop(nullptr);
1509 } else {
1510 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
1511 }
1512 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1513 }
1514 CASE(_baload):
1515 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
1516 CASE(_caload):
1517 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
1518 CASE(_saload):
1519 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
1520 CASE(_laload):
1521 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
1522 CASE(_daload):
1523 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1524
1525 CASE(_fast_icaload): {
1526 // Custom fast access for iload,caload pair.
1527 arrayOop arrObj = (arrayOop) STACK_OBJECT(-1);
1528 jint index = LOCALS_INT(pc[1]);
1529 ARRAY_INDEX_CHECK(arrObj, index);
1530 SET_STACK_INT(*(jchar *)(((address) arrObj->base(T_CHAR)) + index * sizeof(jchar)), -1);
1531 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 0);
1532 }
1533
1534 /* 32-bit stores. These handle conversion to < 32-bit types */
1535 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
1536 { \
1537 ARRAY_INTRO(-3); \
1538 (void)extra; \
1539 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1540 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
1541 }
1542
1543 /* 64-bit stores */
1544 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
1545 { \
1546 ARRAY_INTRO(-4); \
1547 (void)extra; \
1548 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1549 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
1550 }
1551
1552 CASE(_iastore):
1553 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
1554 CASE(_fastore):
1555 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1556 /*
1557 * This one looks different because of the assignability check
1558 */
1559 CASE(_aastore): {
1560 oop rhsObject = STACK_OBJECT(-1);
1561 VERIFY_OOP(rhsObject);
1562 ARRAY_INTRO( -3);
1563 // arrObj, index are set
1564 if (rhsObject != nullptr) {
1565 /* Check assignability of rhsObject into arrObj */
1566 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
1567 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
1568 //
1569 // Check for compatibility. This check must not GC!!
1570 // Seems way more expensive now that we must dispatch
1571 //
1572 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
1573 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "");
1574 }
1575 } else if (arrObj->is_null_free_array()) {
1576 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "Cannot store null in a null-restricted array");
1577 }
1578 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
1579 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1580 }
1581 CASE(_bastore): {
1582 ARRAY_INTRO(-3);
1583 int item = STACK_INT(-1);
1584 // if it is a T_BOOLEAN array, mask the stored value to 0/1
1585 if (arrObj->klass() == Universe::boolArrayKlass()) {
1586 item &= 1;
1587 } else {
1588 assert(arrObj->klass() == Universe::byteArrayKlass(),
1589 "should be byte array otherwise");
1590 }
1591 ((typeArrayOop)arrObj)->byte_at_put(index, item);
1592 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1593 }
1594 CASE(_castore):
1595 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
1596 CASE(_sastore):
1597 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
1598 CASE(_lastore):
1599 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
1600 CASE(_dastore):
1601 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1602
1603 CASE(_arraylength):
1604 {
1605 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
1606 CHECK_NULL(ary);
1607 SET_STACK_INT(ary->length(), -1);
1608 UPDATE_PC_AND_CONTINUE(1);
1609 }
1610
1611 /* monitorenter and monitorexit for locking/unlocking an object */
1612
1613 CASE(_monitorenter): {
1614 oop lockee = STACK_OBJECT(-1);
1615 // derefing's lockee ought to provoke implicit null check
1616 CHECK_NULL(lockee);
1617 // find a free monitor or one already allocated for this object
1618 // if we find a matching object then we need a new monitor
1619 // since this is recursive enter
1620 BasicObjectLock* limit = istate->monitor_base();
1621 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1622 BasicObjectLock* entry = nullptr;
1623 while (most_recent != limit ) {
1624 if (most_recent->obj() == nullptr) entry = most_recent;
1625 else if (most_recent->obj() == lockee) break;
1626 most_recent++;
1627 }
1628 if (entry != nullptr) {
1629 entry->set_obj(lockee);
1630 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1631 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1632 } else {
1633 istate->set_msg(more_monitors);
1634 UPDATE_PC_AND_RETURN(0); // Re-execute
1635 }
1636 }
1637
1638 CASE(_monitorexit): {
1639 oop lockee = STACK_OBJECT(-1);
1640 CHECK_NULL(lockee);
1641 // derefing's lockee ought to provoke implicit null check
1642 // find our monitor slot
1643 BasicObjectLock* limit = istate->monitor_base();
1644 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1645 while (most_recent != limit ) {
1646 if ((most_recent)->obj() == lockee) {
1647 BasicLock* lock = most_recent->lock();
1648 InterpreterRuntime::monitorexit(most_recent);
1649 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1650 }
1651 most_recent++;
1652 }
1653 // Need to throw illegal monitor state exception
1654 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1655 ShouldNotReachHere();
1656 }
1657
1658 /* All of the non-quick opcodes. */
1659
1660 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1661 * constant pool index in the instruction.
1662 */
1663 CASE(_getfield):
1664 CASE(_nofast_getfield):
1665 CASE(_getstatic):
1666 {
1667 u2 index;
1668 index = Bytes::get_native_u2(pc+1);
1669 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
1670
1671 // QQQ Need to make this as inlined as possible. Probably need to
1672 // split all the bytecode cases out so c++ compiler has a chance
1673 // for constant prop to fold everything possible away.
1674
1675 // Interpreter runtime does not expect "nofast" opcodes,
1676 // prepare the vanilla opcode for it.
1677 Bytecodes::Code code = (Bytecodes::Code)opcode;
1678 if (code == Bytecodes::_nofast_getfield) {
1679 code = Bytecodes::_getfield;
1680 }
1681
1682 if (!entry->is_resolved(code)) {
1683 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, code),
1684 handle_exception);
1685 entry = cp->resolved_field_entry_at(index);
1686 }
1687
1688 oop obj;
1689 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
1690 Klass* k = entry->field_holder();
1691 obj = k->java_mirror();
1692 MORE_STACK(1); // Assume single slot push
1693 } else {
1694 obj = STACK_OBJECT(-1);
1695 CHECK_NULL(obj);
1696 // Check if we can rewrite non-volatile _getfield to one of the _fast_Xgetfield.
1697 if (REWRITE_BYTECODES && !entry->is_volatile() &&
1698 ((Bytecodes::Code)opcode != Bytecodes::_nofast_getfield)) {
1699 // Rewrite current BC to _fast_Xgetfield.
1700 REWRITE_AT_PC(fast_get_type((TosState)(entry->tos_state())));
1701 }
1702 }
1703
1704 MAYBE_POST_FIELD_ACCESS(obj);
1705
1706 //
1707 // Now store the result on the stack
1708 //
1709 TosState tos_type = (TosState)(entry->tos_state());
1710 int field_offset = entry->field_offset();
1711 if (entry->is_volatile()) {
1712 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
1713 OrderAccess::fence();
1714 }
1715 switch (tos_type) {
1716 case btos:
1717 case ztos:
1718 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
1719 break;
1720 case ctos:
1721 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
1722 break;
1723 case stos:
1724 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
1725 break;
1726 case itos:
1727 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
1728 break;
1729 case ftos:
1730 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
1731 break;
1732 case ltos:
1733 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
1734 MORE_STACK(1);
1735 break;
1736 case dtos:
1737 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
1738 MORE_STACK(1);
1739 break;
1740 case atos: {
1741 assert(!entry->is_flat(), "Flat volatile field not supported");
1742 oop val = obj->obj_field_acquire(field_offset);
1743 VERIFY_OOP(val);
1744 SET_STACK_OBJECT(val, -1);
1745 break;
1746 }
1747 default:
1748 ShouldNotReachHere();
1749 }
1750 } else {
1751 switch (tos_type) {
1752 case btos:
1753 case ztos:
1754 SET_STACK_INT(obj->byte_field(field_offset), -1);
1755 break;
1756 case ctos:
1757 SET_STACK_INT(obj->char_field(field_offset), -1);
1758 break;
1759 case stos:
1760 SET_STACK_INT(obj->short_field(field_offset), -1);
1761 break;
1762 case itos:
1763 SET_STACK_INT(obj->int_field(field_offset), -1);
1764 break;
1765 case ftos:
1766 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
1767 break;
1768 case ltos:
1769 SET_STACK_LONG(obj->long_field(field_offset), 0);
1770 MORE_STACK(1);
1771 break;
1772 case dtos:
1773 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
1774 MORE_STACK(1);
1775 break;
1776 case atos: {
1777 oop val;
1778 if (entry->is_flat()) {
1779 CALL_VM(InterpreterRuntime::read_flat_field(THREAD, obj, entry), handle_exception);
1780 val = THREAD->vm_result_oop();
1781 THREAD->set_vm_result_oop(nullptr);
1782 } else {
1783 val = obj->obj_field(field_offset);
1784 }
1785 VERIFY_OOP(val);
1786 SET_STACK_OBJECT(val, -1);
1787 break;
1788 }
1789 default:
1790 ShouldNotReachHere();
1791 }
1792 }
1793
1794 UPDATE_PC_AND_CONTINUE(3);
1795 }
1796
1797 CASE(_putfield):
1798 CASE(_nofast_putfield):
1799 CASE(_putstatic):
1800 {
1801 u2 index = Bytes::get_native_u2(pc+1);
1802 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
1803
1804 // Interpreter runtime does not expect "nofast" opcodes,
1805 // prepare the vanilla opcode for it.
1806 Bytecodes::Code code = (Bytecodes::Code)opcode;
1807 if (code == Bytecodes::_nofast_putfield) {
1808 code = Bytecodes::_putfield;
1809 }
1810
1811 if (!entry->is_resolved(code)) {
1812 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, code),
1813 handle_exception);
1814 entry = cp->resolved_field_entry_at(index);
1815 }
1816
1817 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
1818 // out so c++ compiler has a chance for constant prop to fold everything possible away.
1819
1820 oop obj;
1821 int count;
1822 TosState tos_type = (TosState)(entry->tos_state());
1823
1824 count = -1;
1825 if (tos_type == ltos || tos_type == dtos) {
1826 --count;
1827 }
1828 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
1829 Klass* k = entry->field_holder();
1830 obj = k->java_mirror();
1831 } else {
1832 --count;
1833 obj = STACK_OBJECT(count);
1834 CHECK_NULL(obj);
1835
1836 // Check if we can rewrite non-volatile _putfield to one of the _fast_Xputfield.
1837 if (REWRITE_BYTECODES && !entry->is_volatile() &&
1838 ((Bytecodes::Code)opcode != Bytecodes::_nofast_putfield)) {
1839 // Rewrite current BC to _fast_Xputfield.
1840 REWRITE_AT_PC(fast_put_type((TosState)(entry->tos_state())));
1841 }
1842 }
1843
1844 MAYBE_POST_FIELD_MODIFICATION(obj);
1845
1846 //
1847 // Now store the result
1848 //
1849 int field_offset = entry->field_offset();
1850 if (entry->is_volatile()) {
1851 switch (tos_type) {
1852 case ztos:
1853 obj->release_byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
1854 break;
1855 case btos:
1856 obj->release_byte_field_put(field_offset, STACK_INT(-1));
1857 break;
1858 case ctos:
1859 obj->release_char_field_put(field_offset, STACK_INT(-1));
1860 break;
1861 case stos:
1862 obj->release_short_field_put(field_offset, STACK_INT(-1));
1863 break;
1864 case itos:
1865 obj->release_int_field_put(field_offset, STACK_INT(-1));
1866 break;
1867 case ftos:
1868 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
1869 break;
1870 case ltos:
1871 obj->release_long_field_put(field_offset, STACK_LONG(-1));
1872 break;
1873 case dtos:
1874 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
1875 break;
1876 case atos: {
1877 assert(!entry->is_flat(), "Flat volatile field not supported");
1878 oop val = STACK_OBJECT(-1);
1879 VERIFY_OOP(val);
1880 obj->release_obj_field_put(field_offset, val);
1881 break;
1882 }
1883 default:
1884 ShouldNotReachHere();
1885 }
1886 OrderAccess::storeload();
1887 } else {
1888 switch (tos_type) {
1889 case ztos:
1890 obj->byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
1891 break;
1892 case btos:
1893 obj->byte_field_put(field_offset, STACK_INT(-1));
1894 break;
1895 case ctos:
1896 obj->char_field_put(field_offset, STACK_INT(-1));
1897 break;
1898 case stos:
1899 obj->short_field_put(field_offset, STACK_INT(-1));
1900 break;
1901 case itos:
1902 obj->int_field_put(field_offset, STACK_INT(-1));
1903 break;
1904 case ftos:
1905 obj->float_field_put(field_offset, STACK_FLOAT(-1));
1906 break;
1907 case ltos:
1908 obj->long_field_put(field_offset, STACK_LONG(-1));
1909 break;
1910 case dtos:
1911 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
1912 break;
1913 case atos: {
1914 oop val = STACK_OBJECT(-1);
1915 VERIFY_OOP(val);
1916 if (entry->is_flat()) {
1917 CALL_VM(InterpreterRuntime::write_flat_field(THREAD, obj, val, entry), handle_exception);
1918 } else {
1919 obj->obj_field_put(field_offset, val);
1920 }
1921 break;
1922 }
1923 default:
1924 ShouldNotReachHere();
1925 }
1926 }
1927
1928 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
1929 }
1930
1931 CASE(_new): {
1932 u2 index = Bytes::get_Java_u2(pc+1);
1933
1934 // Attempt TLAB allocation first.
1935 //
1936 // To do this, we need to make sure:
1937 // - klass is initialized
1938 // - klass can be fastpath allocated (e.g. does not have finalizer)
1939 // - TLAB accepts the allocation
1940 ConstantPool* constants = istate->method()->constants();
1941 if (UseTLAB && !constants->tag_at(index).is_unresolved_klass()) {
1942 Klass* entry = constants->resolved_klass_at(index);
1943 InstanceKlass* ik = InstanceKlass::cast(entry);
1944 if (ik->is_initialized() && ik->can_be_fastpath_allocated()) {
1945 size_t obj_size = ik->size_helper();
1946 HeapWord* result = THREAD->tlab().allocate(obj_size);
1947 if (result != nullptr) {
1948 // Initialize object field block.
1949 if (!ZeroTLAB) {
1950 // The TLAB was not pre-zeroed, we need to clear the memory here.
1951 size_t hdr_size = oopDesc::header_size();
1952 Copy::fill_to_words(result + hdr_size, obj_size - hdr_size, 0);
1953 }
1954
1955 // Initialize header, mirrors MemAllocator.
1956 if (UseCompactObjectHeaders) {
1957 oopDesc::release_set_mark(result, ik->prototype_header());
1958 } else {
1959 oopDesc::set_mark(result, markWord::prototype());
1960 if (oopDesc::has_klass_gap()) {
1961 oopDesc::set_klass_gap(result, 0);
1962 }
1963 oopDesc::release_set_klass(result, ik);
1964 }
1965 oop obj = cast_to_oop(result);
1966
1967 // Must prevent reordering of stores for object initialization
1968 // with stores that publish the new object.
1969 OrderAccess::storestore();
1970 SET_STACK_OBJECT(obj, 0);
1971 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1972 }
1973 }
1974 }
1975 // Slow case allocation
1976 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
1977 handle_exception);
1978 // Must prevent reordering of stores for object initialization
1979 // with stores that publish the new object.
1980 OrderAccess::storestore();
1981 SET_STACK_OBJECT(THREAD->vm_result_oop(), 0);
1982 THREAD->set_vm_result_oop(nullptr);
1983 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1984 }
1985 CASE(_anewarray): {
1986 u2 index = Bytes::get_Java_u2(pc+1);
1987 jint size = STACK_INT(-1);
1988 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
1989 handle_exception);
1990 // Must prevent reordering of stores for object initialization
1991 // with stores that publish the new object.
1992 OrderAccess::storestore();
1993 SET_STACK_OBJECT(THREAD->vm_result_oop(), -1);
1994 THREAD->set_vm_result_oop(nullptr);
1995 UPDATE_PC_AND_CONTINUE(3);
1996 }
1997 CASE(_multianewarray): {
1998 jint dims = *(pc+3);
1999 jint size = STACK_INT(-1);
2000 // stack grows down, dimensions are up!
2001 jint *dimarray =
2002 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
2003 Interpreter::stackElementWords-1];
2004 //adjust pointer to start of stack element
2005 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
2006 handle_exception);
2007 // Must prevent reordering of stores for object initialization
2008 // with stores that publish the new object.
2009 OrderAccess::storestore();
2010 SET_STACK_OBJECT(THREAD->vm_result_oop(), -dims);
2011 THREAD->set_vm_result_oop(nullptr);
2012 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
2013 }
2014 CASE(_checkcast):
2015 if (STACK_OBJECT(-1) != nullptr) {
2016 VERIFY_OOP(STACK_OBJECT(-1));
2017 u2 index = Bytes::get_Java_u2(pc+1);
2018 // Constant pool may have actual klass or unresolved klass. If it is
2019 // unresolved we must resolve it.
2020 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2021 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2022 }
2023 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index);
2024 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
2025 //
2026 // Check for compatibility. This check must not GC!!
2027 // Seems way more expensive now that we must dispatch.
2028 //
2029 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
2030 ResourceMark rm(THREAD);
2031 char* message = SharedRuntime::generate_class_cast_message(
2032 objKlass, klassOf);
2033 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message);
2034 }
2035 }
2036 UPDATE_PC_AND_CONTINUE(3);
2037
2038 CASE(_instanceof):
2039 if (STACK_OBJECT(-1) == nullptr) {
2040 SET_STACK_INT(0, -1);
2041 } else {
2042 VERIFY_OOP(STACK_OBJECT(-1));
2043 u2 index = Bytes::get_Java_u2(pc+1);
2044 // Constant pool may have actual klass or unresolved klass. If it is
2045 // unresolved we must resolve it.
2046 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2047 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2048 }
2049 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index);
2050 Klass* objKlass = STACK_OBJECT(-1)->klass();
2051 //
2052 // Check for compatibility. This check must not GC!!
2053 // Seems way more expensive now that we must dispatch.
2054 //
2055 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
2056 SET_STACK_INT(1, -1);
2057 } else {
2058 SET_STACK_INT(0, -1);
2059 }
2060 }
2061 UPDATE_PC_AND_CONTINUE(3);
2062
2063 CASE(_ldc_w):
2064 CASE(_ldc):
2065 {
2066 u2 index;
2067 bool wide = false;
2068 int incr = 2; // frequent case
2069 if (opcode == Bytecodes::_ldc) {
2070 index = pc[1];
2071 } else {
2072 index = Bytes::get_Java_u2(pc+1);
2073 incr = 3;
2074 wide = true;
2075 }
2076
2077 ConstantPool* constants = METHOD->constants();
2078 switch (constants->tag_at(index).value()) {
2079 case JVM_CONSTANT_Integer:
2080 SET_STACK_INT(constants->int_at(index), 0);
2081 break;
2082
2083 case JVM_CONSTANT_Float:
2084 SET_STACK_FLOAT(constants->float_at(index), 0);
2085 break;
2086
2087 case JVM_CONSTANT_String:
2088 {
2089 oop result = constants->resolved_reference_at(index);
2090 if (result == nullptr) {
2091 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2092 SET_STACK_OBJECT(THREAD->vm_result_oop(), 0);
2093 THREAD->set_vm_result_oop(nullptr);
2094 } else {
2095 VERIFY_OOP(result);
2096 SET_STACK_OBJECT(result, 0);
2097 }
2098 break;
2099 }
2100
2101 case JVM_CONSTANT_Class:
2102 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
2103 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
2104 break;
2105
2106 case JVM_CONSTANT_UnresolvedClass:
2107 case JVM_CONSTANT_UnresolvedClassInError:
2108 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
2109 SET_STACK_OBJECT(THREAD->vm_result_oop(), 0);
2110 THREAD->set_vm_result_oop(nullptr);
2111 break;
2112
2113 case JVM_CONSTANT_Dynamic:
2114 case JVM_CONSTANT_DynamicInError:
2115 {
2116 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2117 oop result = THREAD->vm_result_oop();
2118 VERIFY_OOP(result);
2119
2120 jvalue value;
2121 BasicType type = java_lang_boxing_object::get_value(result, &value);
2122 switch (type) {
2123 case T_FLOAT: SET_STACK_FLOAT(value.f, 0); break;
2124 case T_INT: SET_STACK_INT(value.i, 0); break;
2125 case T_SHORT: SET_STACK_INT(value.s, 0); break;
2126 case T_BYTE: SET_STACK_INT(value.b, 0); break;
2127 case T_CHAR: SET_STACK_INT(value.c, 0); break;
2128 case T_BOOLEAN: SET_STACK_INT(value.z, 0); break;
2129 default: ShouldNotReachHere();
2130 }
2131
2132 break;
2133 }
2134
2135 default: ShouldNotReachHere();
2136 }
2137 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2138 }
2139
2140 CASE(_ldc2_w):
2141 {
2142 u2 index = Bytes::get_Java_u2(pc+1);
2143
2144 ConstantPool* constants = METHOD->constants();
2145 switch (constants->tag_at(index).value()) {
2146
2147 case JVM_CONSTANT_Long:
2148 SET_STACK_LONG(constants->long_at(index), 1);
2149 break;
2150
2151 case JVM_CONSTANT_Double:
2152 SET_STACK_DOUBLE(constants->double_at(index), 1);
2153 break;
2154
2155 case JVM_CONSTANT_Dynamic:
2156 case JVM_CONSTANT_DynamicInError:
2157 {
2158 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2159 oop result = THREAD->vm_result_oop();
2160 VERIFY_OOP(result);
2161
2162 jvalue value;
2163 BasicType type = java_lang_boxing_object::get_value(result, &value);
2164 switch (type) {
2165 case T_DOUBLE: SET_STACK_DOUBLE(value.d, 1); break;
2166 case T_LONG: SET_STACK_LONG(value.j, 1); break;
2167 default: ShouldNotReachHere();
2168 }
2169
2170 break;
2171 }
2172
2173 default: ShouldNotReachHere();
2174 }
2175 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
2176 }
2177
2178 CASE(_fast_aldc_w):
2179 CASE(_fast_aldc): {
2180 u2 index;
2181 int incr;
2182 if (opcode == Bytecodes::_fast_aldc) {
2183 index = pc[1];
2184 incr = 2;
2185 } else {
2186 index = Bytes::get_native_u2(pc+1);
2187 incr = 3;
2188 }
2189
2190 // We are resolved if the resolved_references array contains a non-null object (CallSite, etc.)
2191 // This kind of CP cache entry does not need to match the flags byte, because
2192 // there is a 1-1 relation between bytecode type and CP entry type.
2193 ConstantPool* constants = METHOD->constants();
2194 oop result = constants->resolved_reference_at(index);
2195 if (result == nullptr) {
2196 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
2197 handle_exception);
2198 result = THREAD->vm_result_oop();
2199 }
2200 if (result == Universe::the_null_sentinel())
2201 result = nullptr;
2202
2203 VERIFY_OOP(result);
2204 SET_STACK_OBJECT(result, 0);
2205 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2206 }
2207
2208 CASE(_invokedynamic): {
2209 u4 index = Bytes::get_native_u4(pc+1);
2210 ResolvedIndyEntry* indy_info = cp->resolved_indy_entry_at(index);
2211 if (!indy_info->is_resolved()) {
2212 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2213 handle_exception);
2214 indy_info = cp->resolved_indy_entry_at(index); // get resolved entry
2215 }
2216 Method* method = indy_info->method();
2217 if (VerifyOops) method->verify();
2218
2219 if (indy_info->has_appendix()) {
2220 constantPoolHandle cp(THREAD, METHOD->constants());
2221 SET_STACK_OBJECT(cp->resolved_reference_from_indy(index), 0);
2222 MORE_STACK(1);
2223 }
2224
2225 istate->set_msg(call_method);
2226 istate->set_callee(method);
2227 istate->set_callee_entry_point(method->from_interpreted_entry());
2228 istate->set_bcp_advance(5);
2229
2230 UPDATE_PC_AND_RETURN(0); // I'll be back...
2231 }
2232
2233 CASE(_invokehandle): {
2234
2235 u2 index = Bytes::get_native_u2(pc+1);
2236 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2237
2238 if (! entry->is_resolved((Bytecodes::Code) opcode)) {
2239 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2240 handle_exception);
2241 entry = cp->resolved_method_entry_at(index);
2242 }
2243
2244 Method* method = entry->method();
2245 if (VerifyOops) method->verify();
2246
2247 if (entry->has_appendix()) {
2248 constantPoolHandle cp(THREAD, METHOD->constants());
2249 SET_STACK_OBJECT(cp->cache()->appendix_if_resolved(entry), 0);
2250 MORE_STACK(1);
2251 }
2252
2253 istate->set_msg(call_method);
2254 istate->set_callee(method);
2255 istate->set_callee_entry_point(method->from_interpreted_entry());
2256 istate->set_bcp_advance(3);
2257
2258 UPDATE_PC_AND_RETURN(0); // I'll be back...
2259 }
2260
2261 CASE(_invokeinterface): {
2262 u2 index = Bytes::get_native_u2(pc+1);
2263
2264 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2265 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2266
2267 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2268 if (!entry->is_resolved((Bytecodes::Code)opcode)) {
2269 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2270 handle_exception);
2271 }
2272
2273 istate->set_msg(call_method);
2274
2275 // Special case of invokeinterface called for virtual method of
2276 // java.lang.Object. See cpCache.cpp for details.
2277 Method* callee = nullptr;
2278 if (entry->is_forced_virtual()) {
2279 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2280 if (entry->is_vfinal()) {
2281 callee = entry->method();
2282 } else {
2283 // Get receiver.
2284 int parms = entry->number_of_parameters();
2285 // Same comments as invokevirtual apply here.
2286 oop rcvr = STACK_OBJECT(-parms);
2287 VERIFY_OOP(rcvr);
2288 Klass* rcvrKlass = rcvr->klass();
2289 callee = (Method*) rcvrKlass->method_at_vtable(entry->table_index());
2290 }
2291 } else if (entry->is_vfinal()) {
2292 // private interface method invocations
2293 //
2294 // Ensure receiver class actually implements
2295 // the resolved interface class. The link resolver
2296 // does this, but only for the first time this
2297 // interface is being called.
2298 int parms = entry->number_of_parameters();
2299 oop rcvr = STACK_OBJECT(-parms);
2300 CHECK_NULL(rcvr);
2301 Klass* recv_klass = rcvr->klass();
2302 Klass* resolved_klass = entry->interface_klass();
2303 if (!recv_klass->is_subtype_of(resolved_klass)) {
2304 ResourceMark rm(THREAD);
2305 char buf[200];
2306 jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s",
2307 recv_klass->external_name(),
2308 resolved_klass->external_name());
2309 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
2310 }
2311 callee = entry->method();
2312 }
2313 if (callee != nullptr) {
2314 istate->set_callee(callee);
2315 istate->set_callee_entry_point(callee->from_interpreted_entry());
2316 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2317 istate->set_callee_entry_point(callee->interpreter_entry());
2318 }
2319 istate->set_bcp_advance(5);
2320 UPDATE_PC_AND_RETURN(0); // I'll be back...
2321 }
2322
2323 // this could definitely be cleaned up QQQ
2324 Method *interface_method = entry->method();
2325 InstanceKlass* iclass = interface_method->method_holder();
2326
2327 // get receiver
2328 int parms = entry->number_of_parameters();
2329 oop rcvr = STACK_OBJECT(-parms);
2330 CHECK_NULL(rcvr);
2331 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
2332
2333 // Receiver subtype check against resolved interface klass (REFC).
2334 {
2335 Klass* refc = entry->interface_klass();
2336 itableOffsetEntry* scan;
2337 for (scan = (itableOffsetEntry*) int2->start_of_itable();
2338 scan->interface_klass() != nullptr;
2339 scan++) {
2340 if (scan->interface_klass() == refc) {
2341 break;
2342 }
2343 }
2344 // Check that the entry is non-null. A null entry means
2345 // that the receiver class doesn't implement the
2346 // interface, and wasn't the same as when the caller was
2347 // compiled.
2348 if (scan->interface_klass() == nullptr) {
2349 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "");
2350 }
2351 }
2352
2353 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
2354 int i;
2355 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
2356 if (ki->interface_klass() == iclass) break;
2357 }
2358 // If the interface isn't found, this class doesn't implement this
2359 // interface. The link resolver checks this but only for the first
2360 // time this interface is called.
2361 if (i == int2->itable_length()) {
2362 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(THREAD, rcvr->klass(), iclass),
2363 handle_exception);
2364 }
2365 int mindex = interface_method->itable_index();
2366
2367 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
2368 callee = im[mindex].method();
2369 if (callee == nullptr) {
2370 CALL_VM(InterpreterRuntime::throw_AbstractMethodErrorVerbose(THREAD, rcvr->klass(), interface_method),
2371 handle_exception);
2372 }
2373
2374 istate->set_callee(callee);
2375 istate->set_callee_entry_point(callee->from_interpreted_entry());
2376 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2377 istate->set_callee_entry_point(callee->interpreter_entry());
2378 }
2379 istate->set_bcp_advance(5);
2380 UPDATE_PC_AND_RETURN(0); // I'll be back...
2381 }
2382
2383 CASE(_invokevirtual):
2384 CASE(_invokespecial):
2385 CASE(_invokestatic): {
2386 u2 index = Bytes::get_native_u2(pc+1);
2387
2388 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2389 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2390 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2391
2392 if (!entry->is_resolved((Bytecodes::Code)opcode)) {
2393 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2394 handle_exception);
2395 entry = cp->resolved_method_entry_at(index);
2396 }
2397
2398 istate->set_msg(call_method);
2399 {
2400 Method* callee;
2401 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
2402 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2403 if (entry->is_vfinal()) {
2404 callee = entry->method();
2405 if (REWRITE_BYTECODES && !CDSConfig::is_using_archive() && !CDSConfig::is_dumping_archive()) {
2406 // Rewrite to _fast_invokevfinal.
2407 REWRITE_AT_PC(Bytecodes::_fast_invokevfinal);
2408 }
2409 } else {
2410 // get receiver
2411 int parms = entry->number_of_parameters();
2412 // this works but needs a resourcemark and seems to create a vtable on every call:
2413 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
2414 //
2415 // this fails with an assert
2416 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
2417 // but this works
2418 oop rcvr = STACK_OBJECT(-parms);
2419 VERIFY_OOP(rcvr);
2420 Klass* rcvrKlass = rcvr->klass();
2421 /*
2422 Executing this code in java.lang.String:
2423 public String(char value[]) {
2424 this.count = value.length;
2425 this.value = (char[])value.clone();
2426 }
2427
2428 a find on rcvr->klass() reports:
2429 {type array char}{type array class}
2430 - klass: {other class}
2431
2432 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
2433 because rcvr->klass()->is_instance_klass() == 0
2434 However it seems to have a vtable in the right location. Huh?
2435 Because vtables have the same offset for ArrayKlass and InstanceKlass.
2436 */
2437 callee = (Method*) rcvrKlass->method_at_vtable(entry->table_index());
2438 }
2439 } else {
2440 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
2441 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2442 }
2443 callee = entry->method();
2444 }
2445
2446 istate->set_callee(callee);
2447 istate->set_callee_entry_point(callee->from_interpreted_entry());
2448 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2449 istate->set_callee_entry_point(callee->interpreter_entry());
2450 }
2451 istate->set_bcp_advance(3);
2452 UPDATE_PC_AND_RETURN(0); // I'll be back...
2453 }
2454 }
2455
2456 /* Allocate memory for a new java object. */
2457
2458 CASE(_newarray): {
2459 BasicType atype = (BasicType) *(pc+1);
2460 jint size = STACK_INT(-1);
2461 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
2462 handle_exception);
2463 // Must prevent reordering of stores for object initialization
2464 // with stores that publish the new object.
2465 OrderAccess::storestore();
2466 SET_STACK_OBJECT(THREAD->vm_result_oop(), -1);
2467 THREAD->set_vm_result_oop(nullptr);
2468
2469 UPDATE_PC_AND_CONTINUE(2);
2470 }
2471
2472 /* Throw an exception. */
2473
2474 CASE(_athrow): {
2475 oop except_oop = STACK_OBJECT(-1);
2476 CHECK_NULL(except_oop);
2477 // set pending_exception so we use common code
2478 THREAD->set_pending_exception(except_oop, nullptr, 0);
2479 goto handle_exception;
2480 }
2481
2482 /* goto and jsr. They are exactly the same except jsr pushes
2483 * the address of the next instruction first.
2484 */
2485
2486 CASE(_jsr): {
2487 /* push bytecode index on stack */
2488 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
2489 MORE_STACK(1);
2490 /* FALL THROUGH */
2491 }
2492
2493 CASE(_goto):
2494 {
2495 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
2496 address branch_pc = pc;
2497 UPDATE_PC(offset);
2498 DO_BACKEDGE_CHECKS(offset, branch_pc);
2499 CONTINUE;
2500 }
2501
2502 CASE(_jsr_w): {
2503 /* push return address on the stack */
2504 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
2505 MORE_STACK(1);
2506 /* FALL THROUGH */
2507 }
2508
2509 CASE(_goto_w):
2510 {
2511 int32_t offset = Bytes::get_Java_u4(pc + 1);
2512 address branch_pc = pc;
2513 UPDATE_PC(offset);
2514 DO_BACKEDGE_CHECKS(offset, branch_pc);
2515 CONTINUE;
2516 }
2517
2518 /* return from a jsr or jsr_w */
2519
2520 CASE(_ret): {
2521 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
2522 UPDATE_PC_AND_CONTINUE(0);
2523 }
2524
2525 /* debugger breakpoint */
2526
2527 CASE(_breakpoint): {
2528 Bytecodes::Code original_bytecode;
2529 DECACHE_STATE();
2530 SET_LAST_JAVA_FRAME();
2531 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
2532 METHOD, pc);
2533 RESET_LAST_JAVA_FRAME();
2534 CACHE_STATE();
2535 if (THREAD->has_pending_exception()) goto handle_exception;
2536 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
2537 handle_exception);
2538
2539 opcode = (jubyte)original_bytecode;
2540 goto opcode_switch;
2541 }
2542
2543 CASE(_fast_agetfield): {
2544 u2 index = Bytes::get_native_u2(pc+1);
2545 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2546 int field_offset = entry->field_offset();
2547
2548 oop obj = STACK_OBJECT(-1);
2549 CHECK_NULL(obj);
2550
2551 MAYBE_POST_FIELD_ACCESS(obj);
2552
2553 oop val;
2554 if (entry->is_flat()) {
2555 CALL_VM(InterpreterRuntime::read_flat_field(THREAD, obj, entry), handle_exception);
2556 val = THREAD->vm_result_oop();
2557 THREAD->set_vm_result_oop(nullptr);
2558 } else {
2559 val = obj->obj_field(field_offset);
2560 }
2561
2562 VERIFY_OOP(val);
2563 SET_STACK_OBJECT(val, -1);
2564 UPDATE_PC_AND_CONTINUE(3);
2565 }
2566
2567 CASE(_fast_bgetfield): {
2568 u2 index = Bytes::get_native_u2(pc+1);
2569 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2570 int field_offset = entry->field_offset();
2571
2572 oop obj = STACK_OBJECT(-1);
2573 CHECK_NULL(obj);
2574
2575 MAYBE_POST_FIELD_ACCESS(obj);
2576
2577 SET_STACK_INT(obj->byte_field(field_offset), -1);
2578 UPDATE_PC_AND_CONTINUE(3);
2579 }
2580
2581 CASE(_fast_cgetfield): {
2582 u2 index = Bytes::get_native_u2(pc+1);
2583 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2584 int field_offset = entry->field_offset();
2585
2586 oop obj = STACK_OBJECT(-1);
2587 CHECK_NULL(obj);
2588
2589 MAYBE_POST_FIELD_ACCESS(obj);
2590
2591 SET_STACK_INT(obj->char_field(field_offset), -1);
2592 UPDATE_PC_AND_CONTINUE(3);
2593 }
2594
2595 CASE(_fast_dgetfield): {
2596 u2 index = Bytes::get_native_u2(pc+1);
2597 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2598 int field_offset = entry->field_offset();
2599
2600 oop obj = STACK_OBJECT(-1);
2601 CHECK_NULL(obj);
2602
2603 MAYBE_POST_FIELD_ACCESS(obj);
2604
2605 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
2606 MORE_STACK(1);
2607 UPDATE_PC_AND_CONTINUE(3);
2608 }
2609
2610 CASE(_fast_fgetfield): {
2611 u2 index = Bytes::get_native_u2(pc+1);
2612 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2613 int field_offset = entry->field_offset();
2614
2615 oop obj = STACK_OBJECT(-1);
2616 CHECK_NULL(obj);
2617
2618 MAYBE_POST_FIELD_ACCESS(obj);
2619
2620 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
2621 UPDATE_PC_AND_CONTINUE(3);
2622 }
2623
2624 CASE(_fast_igetfield): {
2625 u2 index = Bytes::get_native_u2(pc+1);
2626 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2627 int field_offset = entry->field_offset();
2628
2629 oop obj = STACK_OBJECT(-1);
2630 CHECK_NULL(obj);
2631
2632 MAYBE_POST_FIELD_ACCESS(obj);
2633
2634 SET_STACK_INT(obj->int_field(field_offset), -1);
2635 UPDATE_PC_AND_CONTINUE(3);
2636 }
2637
2638 CASE(_fast_lgetfield): {
2639 u2 index = Bytes::get_native_u2(pc+1);
2640 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2641 int field_offset = entry->field_offset();
2642
2643 oop obj = STACK_OBJECT(-1);
2644 CHECK_NULL(obj);
2645
2646 MAYBE_POST_FIELD_ACCESS(obj);
2647
2648 SET_STACK_LONG(obj->long_field(field_offset), 0);
2649 MORE_STACK(1);
2650 UPDATE_PC_AND_CONTINUE(3);
2651 }
2652
2653 CASE(_fast_sgetfield): {
2654 u2 index = Bytes::get_native_u2(pc+1);
2655 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2656 int field_offset = entry->field_offset();
2657
2658 oop obj = STACK_OBJECT(-1);
2659 CHECK_NULL(obj);
2660
2661 MAYBE_POST_FIELD_ACCESS(obj);
2662
2663 SET_STACK_INT(obj->short_field(field_offset), -1);
2664 UPDATE_PC_AND_CONTINUE(3);
2665 }
2666
2667 CASE(_fast_aputfield): {
2668 u2 index = Bytes::get_native_u2(pc+1);
2669 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2670
2671 oop obj = STACK_OBJECT(-2);
2672 CHECK_NULL(obj);
2673
2674 MAYBE_POST_FIELD_MODIFICATION(obj);
2675
2676 int field_offset = entry->field_offset();
2677 oop val = STACK_OBJECT(-1);
2678
2679 if (entry->is_null_free_inline_type()) {
2680 CHECK_NULL_MSG(val, "Value is null");
2681 }
2682
2683 if (entry->is_flat()) {
2684 CALL_VM(InterpreterRuntime::write_flat_field(THREAD, obj, val, entry), handle_exception);
2685 } else {
2686 obj->obj_field_put(field_offset, val);
2687 }
2688
2689 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2690 }
2691
2692 CASE(_fast_bputfield): {
2693 u2 index = Bytes::get_native_u2(pc+1);
2694 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2695
2696 oop obj = STACK_OBJECT(-2);
2697 CHECK_NULL(obj);
2698
2699 MAYBE_POST_FIELD_MODIFICATION(obj);
2700
2701 int field_offset = entry->field_offset();
2702 obj->byte_field_put(field_offset, STACK_INT(-1));
2703
2704 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2705 }
2706
2707 CASE(_fast_zputfield): {
2708 u2 index = Bytes::get_native_u2(pc+1);
2709 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2710
2711 oop obj = STACK_OBJECT(-2);
2712 CHECK_NULL(obj);
2713
2714 MAYBE_POST_FIELD_MODIFICATION(obj);
2715
2716 int field_offset = entry->field_offset();
2717 obj->byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
2718
2719 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2720 }
2721
2722 CASE(_fast_cputfield): {
2723 u2 index = Bytes::get_native_u2(pc+1);
2724 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2725
2726 oop obj = STACK_OBJECT(-2);
2727 CHECK_NULL(obj);
2728
2729 MAYBE_POST_FIELD_MODIFICATION(obj);
2730
2731 int field_offset = entry->field_offset();
2732 obj->char_field_put(field_offset, STACK_INT(-1));
2733
2734 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2735 }
2736
2737 CASE(_fast_dputfield): {
2738 u2 index = Bytes::get_native_u2(pc+1);
2739 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2740
2741 oop obj = STACK_OBJECT(-3);
2742 CHECK_NULL(obj);
2743
2744 MAYBE_POST_FIELD_MODIFICATION(obj);
2745
2746 int field_offset = entry->field_offset();
2747 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
2748
2749 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3);
2750 }
2751
2752 CASE(_fast_fputfield): {
2753 u2 index = Bytes::get_native_u2(pc+1);
2754 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2755
2756 oop obj = STACK_OBJECT(-2);
2757 CHECK_NULL(obj);
2758
2759 MAYBE_POST_FIELD_MODIFICATION(obj);
2760
2761 int field_offset = entry->field_offset();
2762 obj->float_field_put(field_offset, STACK_FLOAT(-1));
2763
2764 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2765 }
2766
2767 CASE(_fast_iputfield): {
2768 u2 index = Bytes::get_native_u2(pc+1);
2769 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2770
2771 oop obj = STACK_OBJECT(-2);
2772 CHECK_NULL(obj);
2773
2774 MAYBE_POST_FIELD_MODIFICATION(obj);
2775
2776 int field_offset = entry->field_offset();
2777 obj->int_field_put(field_offset, STACK_INT(-1));
2778
2779 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2780 }
2781
2782 CASE(_fast_lputfield): {
2783 u2 index = Bytes::get_native_u2(pc+1);
2784 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2785
2786 oop obj = STACK_OBJECT(-3);
2787 CHECK_NULL(obj);
2788
2789 MAYBE_POST_FIELD_MODIFICATION(obj);
2790
2791 int field_offset = entry->field_offset();
2792 obj->long_field_put(field_offset, STACK_LONG(-1));
2793
2794 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3);
2795 }
2796
2797 CASE(_fast_sputfield): {
2798 u2 index = Bytes::get_native_u2(pc+1);
2799 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2800
2801 oop obj = STACK_OBJECT(-2);
2802 CHECK_NULL(obj);
2803
2804 MAYBE_POST_FIELD_MODIFICATION(obj);
2805
2806 int field_offset = entry->field_offset();
2807 obj->short_field_put(field_offset, STACK_INT(-1));
2808
2809 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2810 }
2811
2812 CASE(_fast_aload_0): {
2813 oop obj = LOCALS_OBJECT(0);
2814 VERIFY_OOP(obj);
2815 SET_STACK_OBJECT(obj, 0);
2816 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
2817 }
2818
2819 CASE(_fast_aaccess_0): {
2820 u2 index = Bytes::get_native_u2(pc+2);
2821 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2822 int field_offset = entry->field_offset();
2823
2824 oop obj = LOCALS_OBJECT(0);
2825 CHECK_NULL(obj);
2826 VERIFY_OOP(obj);
2827
2828 MAYBE_POST_FIELD_ACCESS(obj);
2829
2830 oop val;
2831 if (entry->is_flat()) {
2832 CALL_VM(InterpreterRuntime::read_flat_field(THREAD, obj, entry), handle_exception);
2833 val = THREAD->vm_result_oop();
2834 THREAD->set_vm_result_oop(nullptr);
2835 } else {
2836 val = obj->obj_field(field_offset);
2837 }
2838
2839 VERIFY_OOP(val);
2840 SET_STACK_OBJECT(val, 0);
2841 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
2842 }
2843
2844 CASE(_fast_iaccess_0): {
2845 u2 index = Bytes::get_native_u2(pc+2);
2846 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2847 int field_offset = entry->field_offset();
2848
2849 oop obj = LOCALS_OBJECT(0);
2850 CHECK_NULL(obj);
2851 VERIFY_OOP(obj);
2852
2853 MAYBE_POST_FIELD_ACCESS(obj);
2854
2855 SET_STACK_INT(obj->int_field(field_offset), 0);
2856 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
2857 }
2858
2859 CASE(_fast_faccess_0): {
2860 u2 index = Bytes::get_native_u2(pc+2);
2861 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2862 int field_offset = entry->field_offset();
2863
2864 oop obj = LOCALS_OBJECT(0);
2865 CHECK_NULL(obj);
2866 VERIFY_OOP(obj);
2867
2868 MAYBE_POST_FIELD_ACCESS(obj);
2869
2870 SET_STACK_FLOAT(obj->float_field(field_offset), 0);
2871 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
2872 }
2873
2874 CASE(_fast_invokevfinal): {
2875 u2 index = Bytes::get_native_u2(pc+1);
2876 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2877
2878 assert(entry->is_resolved(Bytecodes::_invokevirtual), "Should be resolved before rewriting");
2879
2880 istate->set_msg(call_method);
2881
2882 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2883 Method* callee = entry->method();
2884 istate->set_callee(callee);
2885 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2886 istate->set_callee_entry_point(callee->interpreter_entry());
2887 } else {
2888 istate->set_callee_entry_point(callee->from_interpreted_entry());
2889 }
2890 istate->set_bcp_advance(3);
2891 UPDATE_PC_AND_RETURN(0);
2892 }
2893
2894 DEFAULT:
2895 fatal("Unimplemented opcode %d = %s", opcode,
2896 Bytecodes::name((Bytecodes::Code)opcode));
2897 goto finish;
2898
2899 } /* switch(opc) */
2900
2901
2902 #ifdef USELABELS
2903 check_for_exception:
2904 #endif
2905 {
2906 if (!THREAD->has_pending_exception()) {
2907 CONTINUE;
2908 }
2909 /* We will be gcsafe soon, so flush our state. */
2910 DECACHE_PC();
2911 goto handle_exception;
2912 }
2913 do_continue: ;
2914
2915 } /* while (1) interpreter loop */
2916
2917
2918 // An exception exists in the thread state see whether this activation can handle it
2919 handle_exception: {
2920
2921 HandleMarkCleaner __hmc(THREAD);
2922 Handle except_oop(THREAD, THREAD->pending_exception());
2923 // Prevent any subsequent HandleMarkCleaner in the VM
2924 // from freeing the except_oop handle.
2925 HandleMark __hm(THREAD);
2926
2927 THREAD->clear_pending_exception();
2928 assert(except_oop() != nullptr, "No exception to process");
2929 intptr_t continuation_bci;
2930 // expression stack is emptied
2931 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2932 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
2933 handle_exception);
2934
2935 except_oop = Handle(THREAD, THREAD->vm_result_oop());
2936 THREAD->set_vm_result_oop(nullptr);
2937 if (continuation_bci >= 0) {
2938 // Place exception on top of stack
2939 SET_STACK_OBJECT(except_oop(), 0);
2940 MORE_STACK(1);
2941 pc = METHOD->code_base() + continuation_bci;
2942 if (log_is_enabled(Info, exceptions)) {
2943 ResourceMark rm(THREAD);
2944 stringStream tempst;
2945 tempst.print("interpreter method <%s>\n"
2946 " at bci %d, continuing at %d for thread " INTPTR_FORMAT,
2947 METHOD->print_value_string(),
2948 (int)(istate->bcp() - METHOD->code_base()),
2949 (int)continuation_bci, p2i(THREAD));
2950 Exceptions::log_exception(except_oop, tempst.as_string());
2951 }
2952 // for AbortVMOnException flag
2953 Exceptions::debug_check_abort(except_oop);
2954 goto run;
2955 }
2956 if (log_is_enabled(Info, exceptions)) {
2957 ResourceMark rm;
2958 stringStream tempst;
2959 tempst.print("interpreter method <%s>\n"
2960 " at bci %d, unwinding for thread " INTPTR_FORMAT,
2961 METHOD->print_value_string(),
2962 (int)(istate->bcp() - METHOD->code_base()),
2963 p2i(THREAD));
2964 Exceptions::log_exception(except_oop, tempst.as_string());
2965 }
2966 // for AbortVMOnException flag
2967 Exceptions::debug_check_abort(except_oop);
2968
2969 // No handler in this activation, unwind and try again
2970 THREAD->set_pending_exception(except_oop(), nullptr, 0);
2971 goto handle_return;
2972 } // handle_exception:
2973
2974 // Return from an interpreter invocation with the result of the interpretation
2975 // on the top of the Java Stack (or a pending exception)
2976
2977 handle_Pop_Frame: {
2978
2979 // We don't really do anything special here except we must be aware
2980 // that we can get here without ever locking the method (if sync).
2981 // Also we skip the notification of the exit.
2982
2983 istate->set_msg(popping_frame);
2984 // Clear pending so while the pop is in process
2985 // we don't start another one if a call_vm is done.
2986 THREAD->clear_popframe_condition();
2987 // Let interpreter (only) see the we're in the process of popping a frame
2988 THREAD->set_pop_frame_in_process();
2989
2990 goto handle_return;
2991
2992 } // handle_Pop_Frame
2993
2994 // ForceEarlyReturn ends a method, and returns to the caller with a return value
2995 // given by the invoker of the early return.
2996 handle_Early_Return: {
2997
2998 istate->set_msg(early_return);
2999
3000 // Clear expression stack.
3001 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
3002
3003 JvmtiThreadState *ts = THREAD->jvmti_thread_state();
3004
3005 // Push the value to be returned.
3006 switch (istate->method()->result_type()) {
3007 case T_BOOLEAN:
3008 case T_SHORT:
3009 case T_BYTE:
3010 case T_CHAR:
3011 case T_INT:
3012 SET_STACK_INT(ts->earlyret_value().i, 0);
3013 MORE_STACK(1);
3014 break;
3015 case T_LONG:
3016 SET_STACK_LONG(ts->earlyret_value().j, 1);
3017 MORE_STACK(2);
3018 break;
3019 case T_FLOAT:
3020 SET_STACK_FLOAT(ts->earlyret_value().f, 0);
3021 MORE_STACK(1);
3022 break;
3023 case T_DOUBLE:
3024 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
3025 MORE_STACK(2);
3026 break;
3027 case T_ARRAY:
3028 case T_OBJECT:
3029 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
3030 MORE_STACK(1);
3031 break;
3032 default:
3033 ShouldNotReachHere();
3034 }
3035
3036 ts->clr_earlyret_value();
3037 ts->set_earlyret_oop(nullptr);
3038 ts->clr_earlyret_pending();
3039
3040 // Fall through to handle_return.
3041
3042 } // handle_Early_Return
3043
3044 handle_return: {
3045 // A storestore barrier is required to order initialization of
3046 // final fields with publishing the reference to the object that
3047 // holds the field. Without the barrier the value of final fields
3048 // can be observed to change.
3049 OrderAccess::storestore();
3050
3051 DECACHE_STATE();
3052
3053 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
3054 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
3055 Handle original_exception(THREAD, THREAD->pending_exception());
3056 Handle illegal_state_oop(THREAD, nullptr);
3057
3058 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
3059 // in any following VM entries from freeing our live handles, but illegal_state_oop
3060 // isn't really allocated yet and so doesn't become live until later and
3061 // in unpredictable places. Instead we must protect the places where we enter the
3062 // VM. It would be much simpler (and safer) if we could allocate a real handle with
3063 // a null oop in it and then overwrite the oop later as needed. This isn't
3064 // unfortunately isn't possible.
3065
3066 if (THREAD->has_pending_exception()) {
3067 THREAD->clear_pending_exception();
3068 }
3069
3070 //
3071 // As far as we are concerned we have returned. If we have a pending exception
3072 // that will be returned as this invocation's result. However if we get any
3073 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
3074 // will be our final result (i.e. monitor exception trumps a pending exception).
3075 //
3076
3077 // If we never locked the method (or really passed the point where we would have),
3078 // there is no need to unlock it (or look for other monitors), since that
3079 // could not have happened.
3080
3081 if (THREAD->do_not_unlock_if_synchronized()) {
3082
3083 // Never locked, reset the flag now because obviously any caller must
3084 // have passed their point of locking for us to have gotten here.
3085
3086 THREAD->set_do_not_unlock_if_synchronized(false);
3087 } else {
3088 // At this point we consider that we have returned. We now check that the
3089 // locks were properly block structured. If we find that they were not
3090 // used properly we will return with an illegal monitor exception.
3091 // The exception is checked by the caller not the callee since this
3092 // checking is considered to be part of the invocation and therefore
3093 // in the callers scope (JVM spec 8.13).
3094 //
3095 // Another weird thing to watch for is if the method was locked
3096 // recursively and then not exited properly. This means we must
3097 // examine all the entries in reverse time(and stack) order and
3098 // unlock as we find them. If we find the method monitor before
3099 // we are at the initial entry then we should throw an exception.
3100 // It is not clear the template based interpreter does this
3101 // correctly
3102
3103 BasicObjectLock* base = istate->monitor_base();
3104 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
3105 bool method_unlock_needed = METHOD->is_synchronized();
3106 // We know the initial monitor was used for the method don't check that
3107 // slot in the loop
3108 if (method_unlock_needed) base--;
3109
3110 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
3111 while (end < base) {
3112 oop lockee = end->obj();
3113 if (lockee != nullptr) {
3114 InterpreterRuntime::monitorexit(end);
3115
3116 // One error is plenty
3117 if (illegal_state_oop() == nullptr && !suppress_error) {
3118 {
3119 // Prevent any HandleMarkCleaner from freeing our live handles
3120 HandleMark __hm(THREAD);
3121 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3122 }
3123 assert(THREAD->has_pending_exception(), "Lost our exception!");
3124 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3125 THREAD->clear_pending_exception();
3126 }
3127 }
3128 end++;
3129 }
3130 // Unlock the method if needed
3131 if (method_unlock_needed) {
3132 if (base->obj() == nullptr) {
3133 // The method is already unlocked this is not good.
3134 if (illegal_state_oop() == nullptr && !suppress_error) {
3135 {
3136 // Prevent any HandleMarkCleaner from freeing our live handles
3137 HandleMark __hm(THREAD);
3138 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3139 }
3140 assert(THREAD->has_pending_exception(), "Lost our exception!");
3141 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3142 THREAD->clear_pending_exception();
3143 }
3144 } else {
3145 //
3146 // The initial monitor is always used for the method
3147 // However if that slot is no longer the oop for the method it was unlocked
3148 // and reused by something that wasn't unlocked!
3149 //
3150 // deopt can come in with rcvr dead because c2 knows
3151 // its value is preserved in the monitor. So we can't use locals[0] at all
3152 // and must use first monitor slot.
3153 //
3154 oop rcvr = base->obj();
3155 if (rcvr == nullptr) {
3156 if (!suppress_error) {
3157 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
3158 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3159 THREAD->clear_pending_exception();
3160 }
3161 } else {
3162 InterpreterRuntime::monitorexit(base);
3163 if (THREAD->has_pending_exception()) {
3164 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3165 THREAD->clear_pending_exception();
3166 }
3167 }
3168 }
3169 }
3170 }
3171 // Clear the do_not_unlock flag now.
3172 THREAD->set_do_not_unlock_if_synchronized(false);
3173
3174 //
3175 // Notify jvmti/jvmdi
3176 //
3177 // NOTE: we do not notify a method_exit if we have a pending exception,
3178 // including an exception we generate for unlocking checks. In the former
3179 // case, JVMDI has already been notified by our call for the exception handler
3180 // and in both cases as far as JVMDI is concerned we have already returned.
3181 // If we notify it again JVMDI will be all confused about how many frames
3182 // are still on the stack (4340444).
3183 //
3184 // NOTE Further! It turns out the JVMTI spec in fact expects to see
3185 // method_exit events whenever we leave an activation unless it was done
3186 // for popframe. This is nothing like jvmdi. However we are passing the
3187 // tests at the moment (apparently because they are jvmdi based) so rather
3188 // than change this code and possibly fail tests we will leave it alone
3189 // (with this note) in anticipation of changing the vm and the tests
3190 // simultaneously.
3191
3192 suppress_exit_event = suppress_exit_event || illegal_state_oop() != nullptr;
3193
3194 // Whenever JVMTI puts a thread in interp_only_mode, method
3195 // entry/exit events are sent for that thread to track stack depth.
3196
3197 if (JVMTI_ENABLED && !suppress_exit_event && THREAD->is_interp_only_mode()) {
3198 // Prevent any HandleMarkCleaner from freeing our live handles
3199 HandleMark __hm(THREAD);
3200 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
3201 }
3202
3203 //
3204 // See if we are returning any exception
3205 // A pending exception that was pending prior to a possible popping frame
3206 // overrides the popping frame.
3207 //
3208 assert(!suppress_error || (suppress_error && illegal_state_oop() == nullptr), "Error was not suppressed");
3209 if (illegal_state_oop() != nullptr || original_exception() != nullptr) {
3210 // Inform the frame manager we have no result.
3211 istate->set_msg(throwing_exception);
3212 if (illegal_state_oop() != nullptr)
3213 THREAD->set_pending_exception(illegal_state_oop(), nullptr, 0);
3214 else
3215 THREAD->set_pending_exception(original_exception(), nullptr, 0);
3216 UPDATE_PC_AND_RETURN(0);
3217 }
3218
3219 if (istate->msg() == popping_frame) {
3220 // Make it simpler on the assembly code and set the message for the frame pop.
3221 // returns
3222 if (istate->prev() == nullptr) {
3223 // We must be returning to a deoptimized frame (because popframe only happens between
3224 // two interpreted frames). We need to save the current arguments in C heap so that
3225 // the deoptimized frame when it restarts can copy the arguments to its expression
3226 // stack and re-execute the call. We also have to notify deoptimization that this
3227 // has occurred and to pick the preserved args copy them to the deoptimized frame's
3228 // java expression stack. Yuck.
3229 //
3230 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
3231 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
3232 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
3233 }
3234 } else {
3235 istate->set_msg(return_from_method);
3236 }
3237
3238 // Normal return
3239 // Advance the pc and return to frame manager
3240 UPDATE_PC_AND_RETURN(1);
3241 } /* handle_return: */
3242
3243 // This is really a fatal error return
3244
3245 finish:
3246 DECACHE_TOS();
3247 DECACHE_PC();
3248
3249 return;
3250 }
3251
3252 // This constructor should only be used to construct the object to signal
3253 // interpreter initialization. All other instances should be created by
3254 // the frame manager.
3255 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
3256 if (msg != initialize) ShouldNotReachHere();
3257 _msg = msg;
3258 _self_link = this;
3259 _prev_link = nullptr;
3260 }
3261
3262 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
3263 intptr_t* locals, int locals_offset) {
3264 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
3265 locals[Interpreter::local_index_at(-locals_offset)] = value;
3266 }
3267
3268 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
3269 int to_offset) {
3270 tos[Interpreter::expr_index_at(-to_offset)] =
3271 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
3272 }
3273
3274 void BytecodeInterpreter::dup(intptr_t *tos) {
3275 copy_stack_slot(tos, -1, 0);
3276 }
3277
3278 void BytecodeInterpreter::dup2(intptr_t *tos) {
3279 copy_stack_slot(tos, -2, 0);
3280 copy_stack_slot(tos, -1, 1);
3281 }
3282
3283 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
3284 /* insert top word two down */
3285 copy_stack_slot(tos, -1, 0);
3286 copy_stack_slot(tos, -2, -1);
3287 copy_stack_slot(tos, 0, -2);
3288 }
3289
3290 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
3291 /* insert top word three down */
3292 copy_stack_slot(tos, -1, 0);
3293 copy_stack_slot(tos, -2, -1);
3294 copy_stack_slot(tos, -3, -2);
3295 copy_stack_slot(tos, 0, -3);
3296 }
3297 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
3298 /* insert top 2 slots three down */
3299 copy_stack_slot(tos, -1, 1);
3300 copy_stack_slot(tos, -2, 0);
3301 copy_stack_slot(tos, -3, -1);
3302 copy_stack_slot(tos, 1, -2);
3303 copy_stack_slot(tos, 0, -3);
3304 }
3305 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
3306 /* insert top 2 slots four down */
3307 copy_stack_slot(tos, -1, 1);
3308 copy_stack_slot(tos, -2, 0);
3309 copy_stack_slot(tos, -3, -1);
3310 copy_stack_slot(tos, -4, -2);
3311 copy_stack_slot(tos, 1, -3);
3312 copy_stack_slot(tos, 0, -4);
3313 }
3314
3315
3316 void BytecodeInterpreter::swap(intptr_t *tos) {
3317 // swap top two elements
3318 intptr_t val = tos[Interpreter::expr_index_at(1)];
3319 // Copy -2 entry to -1
3320 copy_stack_slot(tos, -2, -1);
3321 // Store saved -1 entry into -2
3322 tos[Interpreter::expr_index_at(2)] = val;
3323 }
3324 // --------------------------------------------------------------------------------
3325 // Non-product code
3326 #ifndef PRODUCT
3327
3328 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
3329 switch (msg) {
3330 case BytecodeInterpreter::no_request: return("no_request");
3331 case BytecodeInterpreter::initialize: return("initialize");
3332 // status message to C++ interpreter
3333 case BytecodeInterpreter::method_entry: return("method_entry");
3334 case BytecodeInterpreter::method_resume: return("method_resume");
3335 case BytecodeInterpreter::got_monitors: return("got_monitors");
3336 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
3337 // requests to frame manager from C++ interpreter
3338 case BytecodeInterpreter::call_method: return("call_method");
3339 case BytecodeInterpreter::return_from_method: return("return_from_method");
3340 case BytecodeInterpreter::more_monitors: return("more_monitors");
3341 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
3342 case BytecodeInterpreter::popping_frame: return("popping_frame");
3343 case BytecodeInterpreter::do_osr: return("do_osr");
3344 // deopt
3345 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
3346 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
3347 default: return("BAD MSG");
3348 }
3349 }
3350 void
3351 BytecodeInterpreter::print() {
3352 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
3353 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
3354 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
3355 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
3356 {
3357 ResourceMark rm;
3358 char *method_name = _method->name_and_sig_as_C_string();
3359 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
3360 }
3361 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
3362 tty->print_cr("msg: %s", C_msg(this->_msg));
3363 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
3364 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
3365 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
3366 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
3367 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
3368 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
3369 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp));
3370 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
3371 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
3372 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
3373 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
3374 }
3375
3376 extern "C" {
3377 void PI(uintptr_t arg) {
3378 ((BytecodeInterpreter*)arg)->print();
3379 }
3380 }
3381 #endif // PRODUCT