1 /*
2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/cdsConfig.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "interpreter/bytecodeHistogram.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "interpreter/zero/bytecodeInterpreter.inline.hpp"
35 #include "jvm_io.h"
36 #include "logging/log.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "memory/universe.hpp"
39 #include "oops/constantPool.inline.hpp"
40 #include "oops/cpCache.inline.hpp"
41 #include "oops/instanceKlass.inline.hpp"
42 #include "oops/klass.inline.hpp"
43 #include "oops/method.inline.hpp"
44 #include "oops/methodCounters.hpp"
45 #include "oops/objArrayKlass.hpp"
46 #include "oops/objArrayOop.inline.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "oops/resolvedFieldEntry.hpp"
49 #include "oops/resolvedIndyEntry.hpp"
50 #include "oops/resolvedMethodEntry.hpp"
51 #include "oops/typeArrayOop.inline.hpp"
52 #include "prims/jvmtiExport.hpp"
53 #include "prims/jvmtiThreadState.hpp"
54 #include "runtime/atomicAccess.hpp"
55 #include "runtime/basicLock.inline.hpp"
56 #include "runtime/frame.inline.hpp"
57 #include "runtime/globals.hpp"
58 #include "runtime/handles.inline.hpp"
59 #include "runtime/interfaceSupport.inline.hpp"
60 #include "runtime/orderAccess.hpp"
61 #include "runtime/sharedRuntime.hpp"
62 #include "utilities/debug.hpp"
63 #include "utilities/exceptions.hpp"
64 #include "utilities/globalDefinitions.hpp"
65 #include "utilities/macros.hpp"
66
67 /*
68 * USELABELS - If using GCC, then use labels for the opcode dispatching
69 * rather -then a switch statement. This improves performance because it
70 * gives us the opportunity to have the instructions that calculate the
71 * next opcode to jump to be intermixed with the rest of the instructions
72 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
73 */
74 #undef USELABELS
75 #ifdef __GNUC__
76 /*
77 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
78 don't use the computed goto approach.
79 */
80 #ifndef ASSERT
81 #define USELABELS
82 #endif
83 #endif
84
85 #undef CASE
86 #ifdef USELABELS
87 #define CASE(opcode) opc ## opcode
88 #define DEFAULT opc_default
89 #else
90 #define CASE(opcode) case Bytecodes:: opcode
91 #define DEFAULT default
92 #endif
93
94 /*
95 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
96 * opcode before going back to the top of the while loop, rather then having
97 * the top of the while loop handle it. This provides a better opportunity
98 * for instruction scheduling. Some compilers just do this prefetch
99 * automatically. Some actually end up with worse performance if you
100 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
101 */
102 #undef PREFETCH_OPCCODE
103 #define PREFETCH_OPCCODE
104
105 JRT_ENTRY(void, at_safepoint(JavaThread* current)) {}
106 JRT_END
107
108 /*
109 Interpreter safepoint: it is expected that the interpreter will have no live
110 handles of its own creation live at an interpreter safepoint. Therefore we
111 run a HandleMarkCleaner and trash all handles allocated in the call chain
112 since the JavaCalls::call_helper invocation that initiated the chain.
113 There really shouldn't be any handles remaining to trash but this is cheap
114 in relation to a safepoint.
115 */
116 #define RETURN_SAFEPOINT \
117 if (SafepointMechanism::should_process(THREAD)) { \
118 CALL_VM(at_safepoint(THREAD), handle_exception); \
119 }
120
121 /*
122 * VM_JAVA_ERROR - Macro for throwing a java exception from
123 * the interpreter loop. Should really be a CALL_VM but there
124 * is no entry point to do the transition to vm so we just
125 * do it by hand here.
126 */
127 #define VM_JAVA_ERROR_NO_JUMP(name, msg) \
128 DECACHE_STATE(); \
129 SET_LAST_JAVA_FRAME(); \
130 { \
131 ThreadInVMfromJava trans(THREAD); \
132 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
133 } \
134 RESET_LAST_JAVA_FRAME(); \
135 CACHE_STATE();
136
137 // Normal throw of a java error.
138 #define VM_JAVA_ERROR(name, msg) \
139 VM_JAVA_ERROR_NO_JUMP(name, msg) \
140 goto handle_exception;
141
142 #ifdef PRODUCT
143 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
144 #else
145 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
146 { \
147 if (PrintBytecodeHistogram) { \
148 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
149 } \
150 if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) { \
151 BytecodeCounter::_counter_value++; \
152 if (StopInterpreterAt == BytecodeCounter::_counter_value) { \
153 os::breakpoint(); \
154 } \
155 if (TraceBytecodes) { \
156 CALL_VM((void)InterpreterRuntime::trace_bytecode(THREAD, 0, \
157 topOfStack[Interpreter::expr_index_at(1)], \
158 topOfStack[Interpreter::expr_index_at(2)]), \
159 handle_exception); \
160 } \
161 } \
162 }
163 #endif
164
165 #undef DEBUGGER_SINGLE_STEP_NOTIFY
166 #if INCLUDE_JVMTI
167 /* NOTE: (kbr) This macro must be called AFTER the PC has been
168 incremented. JvmtiExport::at_single_stepping_point() may cause a
169 breakpoint opcode to get inserted at the current PC to allow the
170 debugger to coalesce single-step events.
171
172 As a result if we call at_single_stepping_point() we refetch opcode
173 to get the current opcode. This will override any other prefetching
174 that might have occurred.
175 */
176 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
177 { \
178 if (JVMTI_ENABLED && JvmtiExport::should_post_single_step()) { \
179 DECACHE_STATE(); \
180 SET_LAST_JAVA_FRAME(); \
181 ThreadInVMfromJava trans(THREAD); \
182 JvmtiExport::at_single_stepping_point(THREAD, \
183 istate->method(), \
184 pc); \
185 RESET_LAST_JAVA_FRAME(); \
186 CACHE_STATE(); \
187 if (THREAD->has_pending_popframe() && \
188 !THREAD->pop_frame_in_process()) { \
189 goto handle_Pop_Frame; \
190 } \
191 if (THREAD->jvmti_thread_state() && \
192 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
193 goto handle_Early_Return; \
194 } \
195 opcode = *pc; \
196 } \
197 }
198 #else
199 #define DEBUGGER_SINGLE_STEP_NOTIFY()
200 #endif // INCLUDE_JVMTI
201
202 /*
203 * CONTINUE - Macro for executing the next opcode.
204 */
205 #undef CONTINUE
206 #ifdef USELABELS
207 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
208 // initialization (which is is the initialization of the table pointer...)
209 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
210 #define CONTINUE { \
211 opcode = *pc; \
212 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
213 DEBUGGER_SINGLE_STEP_NOTIFY(); \
214 DISPATCH(opcode); \
215 }
216 #else
217 #ifdef PREFETCH_OPCCODE
218 #define CONTINUE { \
219 opcode = *pc; \
220 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
221 DEBUGGER_SINGLE_STEP_NOTIFY(); \
222 continue; \
223 }
224 #else
225 #define CONTINUE { \
226 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
227 DEBUGGER_SINGLE_STEP_NOTIFY(); \
228 continue; \
229 }
230 #endif
231 #endif
232
233
234 #define UPDATE_PC(opsize) {pc += opsize; }
235 /*
236 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
237 */
238 #undef UPDATE_PC_AND_TOS
239 #define UPDATE_PC_AND_TOS(opsize, stack) \
240 {pc += opsize; MORE_STACK(stack); }
241
242 /*
243 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
244 * and executing the next opcode. It's somewhat similar to the combination
245 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
246 */
247 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
248 #ifdef USELABELS
249 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
250 pc += opsize; opcode = *pc; MORE_STACK(stack); \
251 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
252 DEBUGGER_SINGLE_STEP_NOTIFY(); \
253 DISPATCH(opcode); \
254 }
255
256 #define UPDATE_PC_AND_CONTINUE(opsize) { \
257 pc += opsize; opcode = *pc; \
258 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
259 DEBUGGER_SINGLE_STEP_NOTIFY(); \
260 DISPATCH(opcode); \
261 }
262 #else
263 #ifdef PREFETCH_OPCCODE
264 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
265 pc += opsize; opcode = *pc; MORE_STACK(stack); \
266 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
267 DEBUGGER_SINGLE_STEP_NOTIFY(); \
268 goto do_continue; \
269 }
270
271 #define UPDATE_PC_AND_CONTINUE(opsize) { \
272 pc += opsize; opcode = *pc; \
273 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
274 DEBUGGER_SINGLE_STEP_NOTIFY(); \
275 goto do_continue; \
276 }
277 #else
278 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
279 pc += opsize; MORE_STACK(stack); \
280 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
281 DEBUGGER_SINGLE_STEP_NOTIFY(); \
282 goto do_continue; \
283 }
284
285 #define UPDATE_PC_AND_CONTINUE(opsize) { \
286 pc += opsize; \
287 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
288 DEBUGGER_SINGLE_STEP_NOTIFY(); \
289 goto do_continue; \
290 }
291 #endif /* PREFETCH_OPCCODE */
292 #endif /* USELABELS */
293
294 // About to call a new method, update the save the adjusted pc and return to frame manager
295 #define UPDATE_PC_AND_RETURN(opsize) \
296 DECACHE_TOS(); \
297 istate->set_bcp(pc+opsize); \
298 return;
299
300 #define REWRITE_AT_PC(val) \
301 *pc = val;
302
303 #define METHOD istate->method()
304 #define GET_METHOD_COUNTERS(res)
305 #define DO_BACKEDGE_CHECKS(skip, branch_pc)
306
307 /*
308 * For those opcodes that need to have a GC point on a backwards branch
309 */
310
311 /*
312 * Macros for caching and flushing the interpreter state. Some local
313 * variables need to be flushed out to the frame before we do certain
314 * things (like pushing frames or becoming gc safe) and some need to
315 * be recached later (like after popping a frame). We could use one
316 * macro to cache or decache everything, but this would be less then
317 * optimal because we don't always need to cache or decache everything
318 * because some things we know are already cached or decached.
319 */
320 #undef DECACHE_TOS
321 #undef CACHE_TOS
322 #undef CACHE_PREV_TOS
323 #define DECACHE_TOS() istate->set_stack(topOfStack);
324
325 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
326
327 #undef DECACHE_PC
328 #undef CACHE_PC
329 #define DECACHE_PC() istate->set_bcp(pc);
330 #define CACHE_PC() pc = istate->bcp();
331 #define CACHE_CP() cp = istate->constants();
332 #define CACHE_LOCALS() locals = istate->locals();
333 #undef CACHE_FRAME
334 #define CACHE_FRAME()
335
336 // BCI() returns the current bytecode-index.
337 #undef BCI
338 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
339
340 /*
341 * CHECK_NULL - Macro for throwing a NullPointerException if the object
342 * passed is a null ref.
343 * On some architectures/platforms it should be possible to do this implicitly
344 */
345 #undef CHECK_NULL
346 #define CHECK_NULL(obj_) \
347 if ((obj_) == nullptr) { \
348 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), nullptr); \
349 } \
350 VERIFY_OOP(obj_)
351
352 #define VMdoubleConstZero() 0.0
353 #define VMdoubleConstOne() 1.0
354 #define VMlongConstZero() (max_jlong-max_jlong)
355 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
356
357 /*
358 * Alignment
359 */
360 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
361
362 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
363 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
364
365 // Reload interpreter state after calling the VM or a possible GC
366 #define CACHE_STATE() \
367 CACHE_TOS(); \
368 CACHE_PC(); \
369 CACHE_CP(); \
370 CACHE_LOCALS();
371
372 // Call the VM with last java frame only.
373 #define CALL_VM_NAKED_LJF(func) \
374 DECACHE_STATE(); \
375 SET_LAST_JAVA_FRAME(); \
376 func; \
377 RESET_LAST_JAVA_FRAME(); \
378 CACHE_STATE();
379
380 // Call the VM. Don't check for pending exceptions.
381 #define CALL_VM_NOCHECK(func) \
382 CALL_VM_NAKED_LJF(func) \
383 if (THREAD->has_pending_popframe() && \
384 !THREAD->pop_frame_in_process()) { \
385 goto handle_Pop_Frame; \
386 } \
387 if (THREAD->jvmti_thread_state() && \
388 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
389 goto handle_Early_Return; \
390 }
391
392 // Call the VM and check for pending exceptions
393 #define CALL_VM(func, label) { \
394 CALL_VM_NOCHECK(func); \
395 if (THREAD->has_pending_exception()) goto label; \
396 }
397
398 #define MAYBE_POST_FIELD_ACCESS(obj) { \
399 if (JVMTI_ENABLED) { \
400 int* count_addr; \
401 /* Check to see if a field modification watch has been set */ \
402 /* before we take the time to call into the VM. */ \
403 count_addr = (int*)JvmtiExport::get_field_access_count_addr(); \
404 if (*count_addr > 0) { \
405 oop target; \
406 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { \
407 target = nullptr; \
408 } else { \
409 target = obj; \
410 } \
411 CALL_VM(InterpreterRuntime::post_field_access(THREAD, \
412 target, entry), \
413 handle_exception); \
414 } \
415 } \
416 }
417
418 #define MAYBE_POST_FIELD_MODIFICATION(obj) { \
419 if (JVMTI_ENABLED) { \
420 int* count_addr; \
421 /* Check to see if a field modification watch has been set */ \
422 /* before we take the time to call into the VM. */ \
423 count_addr = (int*)JvmtiExport::get_field_modification_count_addr(); \
424 if (*count_addr > 0) { \
425 oop target; \
426 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { \
427 target = nullptr; \
428 } else { \
429 target = obj; \
430 } \
431 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, \
432 target, entry, \
433 (jvalue*)STACK_SLOT(-1)), \
434 handle_exception); \
435 } \
436 } \
437 }
438
439 static inline int fast_get_type(TosState tos) {
440 switch (tos) {
441 case ztos:
442 case btos: return Bytecodes::_fast_bgetfield;
443 case ctos: return Bytecodes::_fast_cgetfield;
444 case stos: return Bytecodes::_fast_sgetfield;
445 case itos: return Bytecodes::_fast_igetfield;
446 case ltos: return Bytecodes::_fast_lgetfield;
447 case ftos: return Bytecodes::_fast_fgetfield;
448 case dtos: return Bytecodes::_fast_dgetfield;
449 case atos: return Bytecodes::_fast_agetfield;
450 default:
451 ShouldNotReachHere();
452 return -1;
453 }
454 }
455
456 static inline int fast_put_type(TosState tos) {
457 switch (tos) {
458 case ztos: return Bytecodes::_fast_zputfield;
459 case btos: return Bytecodes::_fast_bputfield;
460 case ctos: return Bytecodes::_fast_cputfield;
461 case stos: return Bytecodes::_fast_sputfield;
462 case itos: return Bytecodes::_fast_iputfield;
463 case ltos: return Bytecodes::_fast_lputfield;
464 case ftos: return Bytecodes::_fast_fputfield;
465 case dtos: return Bytecodes::_fast_dputfield;
466 case atos: return Bytecodes::_fast_aputfield;
467 default:
468 ShouldNotReachHere();
469 return -1;
470 }
471 }
472
473 /*
474 * BytecodeInterpreter::run(interpreterState istate)
475 *
476 * The real deal. This is where byte codes actually get interpreted.
477 * Basically it's a big while loop that iterates until we return from
478 * the method passed in.
479 */
480
481 // Instantiate variants of the method for future linking.
482 template void BytecodeInterpreter::run<false, false>(interpreterState istate);
483 template void BytecodeInterpreter::run<false, true>(interpreterState istate);
484 template void BytecodeInterpreter::run< true, false>(interpreterState istate);
485 template void BytecodeInterpreter::run< true, true>(interpreterState istate);
486
487 template<bool JVMTI_ENABLED, bool REWRITE_BYTECODES>
488 void BytecodeInterpreter::run(interpreterState istate) {
489 intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
490 address pc = istate->bcp();
491 jubyte opcode;
492 intptr_t* locals = istate->locals();
493 ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
494 #ifdef LOTS_OF_REGS
495 JavaThread* THREAD = istate->thread();
496 #else
497 #undef THREAD
498 #define THREAD istate->thread()
499 #endif
500
501 #ifdef ASSERT
502 assert(labs(istate->stack_base() - istate->stack_limit()) == (istate->method()->max_stack() + 1),
503 "Bad stack limit");
504 /* QQQ this should be a stack method so we don't know actual direction */
505 assert(topOfStack >= istate->stack_limit() && topOfStack < istate->stack_base(),
506 "Stack top out of range");
507
508 // Verify linkages.
509 interpreterState l = istate;
510 do {
511 assert(l == l->_self_link, "bad link");
512 l = l->_prev_link;
513 } while (l != nullptr);
514 // Screwups with stack management usually cause us to overwrite istate
515 // save a copy so we can verify it.
516 interpreterState orig = istate;
517 #endif
518
519 #ifdef USELABELS
520 const static void* const opclabels_data[256] = {
521 /* 0x00 */ &&opc_nop, &&opc_aconst_null, &&opc_iconst_m1, &&opc_iconst_0,
522 /* 0x04 */ &&opc_iconst_1, &&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
523 /* 0x08 */ &&opc_iconst_5, &&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
524 /* 0x0C */ &&opc_fconst_1, &&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
525
526 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
527 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
528 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0, &&opc_iload_1,
529 /* 0x1C */ &&opc_iload_2, &&opc_iload_3, &&opc_lload_0, &&opc_lload_1,
530
531 /* 0x20 */ &&opc_lload_2, &&opc_lload_3, &&opc_fload_0, &&opc_fload_1,
532 /* 0x24 */ &&opc_fload_2, &&opc_fload_3, &&opc_dload_0, &&opc_dload_1,
533 /* 0x28 */ &&opc_dload_2, &&opc_dload_3, &&opc_aload_0, &&opc_aload_1,
534 /* 0x2C */ &&opc_aload_2, &&opc_aload_3, &&opc_iaload, &&opc_laload,
535
536 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
537 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
538 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
539 /* 0x3C */ &&opc_istore_1, &&opc_istore_2, &&opc_istore_3, &&opc_lstore_0,
540
541 /* 0x40 */ &&opc_lstore_1, &&opc_lstore_2, &&opc_lstore_3, &&opc_fstore_0,
542 /* 0x44 */ &&opc_fstore_1, &&opc_fstore_2, &&opc_fstore_3, &&opc_dstore_0,
543 /* 0x48 */ &&opc_dstore_1, &&opc_dstore_2, &&opc_dstore_3, &&opc_astore_0,
544 /* 0x4C */ &&opc_astore_1, &&opc_astore_2, &&opc_astore_3, &&opc_iastore,
545
546 /* 0x50 */ &&opc_lastore, &&opc_fastore, &&opc_dastore, &&opc_aastore,
547 /* 0x54 */ &&opc_bastore, &&opc_castore, &&opc_sastore, &&opc_pop,
548 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
549 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1, &&opc_dup2_x2, &&opc_swap,
550
551 /* 0x60 */ &&opc_iadd, &&opc_ladd, &&opc_fadd, &&opc_dadd,
552 /* 0x64 */ &&opc_isub, &&opc_lsub, &&opc_fsub, &&opc_dsub,
553 /* 0x68 */ &&opc_imul, &&opc_lmul, &&opc_fmul, &&opc_dmul,
554 /* 0x6C */ &&opc_idiv, &&opc_ldiv, &&opc_fdiv, &&opc_ddiv,
555
556 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem, &&opc_drem,
557 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg, &&opc_dneg,
558 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr, &&opc_lshr,
559 /* 0x7C */ &&opc_iushr, &&opc_lushr, &&opc_iand, &&opc_land,
560
561 /* 0x80 */ &&opc_ior, &&opc_lor, &&opc_ixor, &&opc_lxor,
562 /* 0x84 */ &&opc_iinc, &&opc_i2l, &&opc_i2f, &&opc_i2d,
563 /* 0x88 */ &&opc_l2i, &&opc_l2f, &&opc_l2d, &&opc_f2i,
564 /* 0x8C */ &&opc_f2l, &&opc_f2d, &&opc_d2i, &&opc_d2l,
565
566 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
567 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl, &&opc_fcmpg, &&opc_dcmpl,
568 /* 0x98 */ &&opc_dcmpg, &&opc_ifeq, &&opc_ifne, &&opc_iflt,
569 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
570
571 /* 0xA0 */ &&opc_if_icmpne, &&opc_if_icmplt, &&opc_if_icmpge, &&opc_if_icmpgt,
572 /* 0xA4 */ &&opc_if_icmple, &&opc_if_acmpeq, &&opc_if_acmpne, &&opc_goto,
573 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch, &&opc_lookupswitch,
574 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
575
576 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
577 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual, &&opc_invokespecial,
578 /* 0xB8 */ &&opc_invokestatic, &&opc_invokeinterface, &&opc_invokedynamic, &&opc_new,
579 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
580
581 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
582 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
583 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_fast_agetfield,
584 /* 0xCC */ &&opc_fast_bgetfield,&&opc_fast_cgetfield, &&opc_fast_dgetfield, &&opc_fast_fgetfield,
585
586 /* 0xD0 */ &&opc_fast_igetfield,&&opc_fast_lgetfield, &&opc_fast_sgetfield, &&opc_fast_aputfield,
587 /* 0xD4 */ &&opc_fast_bputfield,&&opc_fast_zputfield, &&opc_fast_cputfield, &&opc_fast_dputfield,
588 /* 0xD8 */ &&opc_fast_fputfield,&&opc_fast_iputfield, &&opc_fast_lputfield, &&opc_fast_sputfield,
589 /* 0xDC */ &&opc_fast_aload_0, &&opc_fast_iaccess_0, &&opc_fast_aaccess_0, &&opc_fast_faccess_0,
590
591 /* 0xE0 */ &&opc_fast_iload, &&opc_fast_iload2, &&opc_fast_icaload, &&opc_fast_invokevfinal,
592 /* 0xE4 */ &&opc_default, &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w,
593 /* 0xE8 */ &&opc_return_register_finalizer,
594 &&opc_invokehandle, &&opc_nofast_getfield,&&opc_nofast_putfield,
595 /* 0xEC */ &&opc_nofast_aload_0,&&opc_nofast_iload, &&opc_default, &&opc_default,
596
597 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
598 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
599 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
600 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
601 };
602 uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
603 #endif /* USELABELS */
604
605 switch (istate->msg()) {
606 case initialize: {
607 ShouldNotCallThis();
608 return;
609 }
610 case method_entry: {
611 THREAD->set_do_not_unlock_if_synchronized(true);
612
613 // Lock method if synchronized.
614 if (METHOD->is_synchronized()) {
615 // oop rcvr = locals[0].j.r;
616 oop rcvr;
617 if (METHOD->is_static()) {
618 rcvr = METHOD->constants()->pool_holder()->java_mirror();
619 } else {
620 rcvr = LOCALS_OBJECT(0);
621 VERIFY_OOP(rcvr);
622 }
623
624 // The initial monitor is ours for the taking.
625 BasicObjectLock* mon = &istate->monitor_base()[-1];
626 mon->set_obj(rcvr);
627 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
628 }
629 THREAD->set_do_not_unlock_if_synchronized(false);
630
631 // Notify jvmti.
632 // Whenever JVMTI puts a thread in interp_only_mode, method
633 // entry/exit events are sent for that thread to track stack depth.
634 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
635 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
636 handle_exception);
637 }
638
639 goto run;
640 }
641
642 case popping_frame: {
643 // returned from a java call to pop the frame, restart the call
644 // clear the message so we don't confuse ourselves later
645 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
646 istate->set_msg(no_request);
647 THREAD->clr_pop_frame_in_process();
648 goto run;
649 }
650
651 case method_resume: {
652 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
653 // resume
654 os::breakpoint();
655 }
656 // returned from a java call, continue executing.
657 if (THREAD->has_pending_popframe() && !THREAD->pop_frame_in_process()) {
658 goto handle_Pop_Frame;
659 }
660 if (THREAD->jvmti_thread_state() &&
661 THREAD->jvmti_thread_state()->is_earlyret_pending()) {
662 goto handle_Early_Return;
663 }
664
665 if (THREAD->has_pending_exception()) goto handle_exception;
666 // Update the pc by the saved amount of the invoke bytecode size
667 UPDATE_PC(istate->bcp_advance());
668 goto run;
669 }
670
671 case deopt_resume2: {
672 // Returned from an opcode that will reexecute. Deopt was
673 // a result of a PopFrame request.
674 //
675 goto run;
676 }
677
678 case deopt_resume: {
679 // Returned from an opcode that has completed. The stack has
680 // the result all we need to do is skip across the bytecode
681 // and continue (assuming there is no exception pending)
682 //
683 // compute continuation length
684 //
685 // Note: it is possible to deopt at a return_register_finalizer opcode
686 // because this requires entering the vm to do the registering. While the
687 // opcode is complete we can't advance because there are no more opcodes
688 // much like trying to deopt at a poll return. In that has we simply
689 // get out of here
690 //
691 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
692 // this will do the right thing even if an exception is pending.
693 goto handle_return;
694 }
695 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
696 if (THREAD->has_pending_exception()) goto handle_exception;
697 goto run;
698 }
699 case got_monitors: {
700 // continue locking now that we have a monitor to use
701 // we expect to find newly allocated monitor at the "top" of the monitor stack.
702 oop lockee = STACK_OBJECT(-1);
703 VERIFY_OOP(lockee);
704 // derefing's lockee ought to provoke implicit null check
705 // find a free monitor
706 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
707 assert(entry->obj() == nullptr, "Frame manager didn't allocate the monitor");
708 entry->set_obj(lockee);
709 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
710 UPDATE_PC_AND_TOS(1, -1);
711 goto run;
712 }
713 default: {
714 fatal("Unexpected message from frame manager");
715 }
716 }
717
718 run:
719
720 DO_UPDATE_INSTRUCTION_COUNT(*pc)
721 DEBUGGER_SINGLE_STEP_NOTIFY();
722 #ifdef PREFETCH_OPCCODE
723 opcode = *pc; /* prefetch first opcode */
724 #endif
725
726 #ifndef USELABELS
727 while (1)
728 #endif
729 {
730 #ifndef PREFETCH_OPCCODE
731 opcode = *pc;
732 #endif
733 // Seems like this happens twice per opcode. At worst this is only
734 // need at entry to the loop.
735 // DEBUGGER_SINGLE_STEP_NOTIFY();
736 /* Using this labels avoids double breakpoints when quickening and
737 * when returning from transition frames.
738 */
739 opcode_switch:
740 assert(istate == orig, "Corrupted istate");
741 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
742 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
743 assert(topOfStack < istate->stack_base(), "Stack underrun");
744
745 #ifdef USELABELS
746 DISPATCH(opcode);
747 #else
748 switch (opcode)
749 #endif
750 {
751 CASE(_nop):
752 UPDATE_PC_AND_CONTINUE(1);
753
754 /* Push miscellaneous constants onto the stack. */
755
756 CASE(_aconst_null):
757 SET_STACK_OBJECT(nullptr, 0);
758 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
759
760 #undef OPC_CONST_n
761 #define OPC_CONST_n(opcode, const_type, value) \
762 CASE(opcode): \
763 SET_STACK_ ## const_type(value, 0); \
764 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
765
766 OPC_CONST_n(_iconst_m1, INT, -1);
767 OPC_CONST_n(_iconst_0, INT, 0);
768 OPC_CONST_n(_iconst_1, INT, 1);
769 OPC_CONST_n(_iconst_2, INT, 2);
770 OPC_CONST_n(_iconst_3, INT, 3);
771 OPC_CONST_n(_iconst_4, INT, 4);
772 OPC_CONST_n(_iconst_5, INT, 5);
773 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
774 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
775 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
776
777 #undef OPC_CONST2_n
778 #define OPC_CONST2_n(opcname, value, key, kind) \
779 CASE(_##opcname): \
780 { \
781 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
782 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
783 }
784 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
785 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
786 OPC_CONST2_n(lconst_0, Zero, long, LONG);
787 OPC_CONST2_n(lconst_1, One, long, LONG);
788
789 /* Load constant from constant pool: */
790
791 /* Push a 1-byte signed integer value onto the stack. */
792 CASE(_bipush):
793 SET_STACK_INT((jbyte)(pc[1]), 0);
794 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
795
796 /* Push a 2-byte signed integer constant onto the stack. */
797 CASE(_sipush):
798 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
799 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
800
801 /* load from local variable */
802
803 CASE(_aload):
804 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
805 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
806 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
807
808 CASE(_iload):
809 {
810 if (REWRITE_BYTECODES) {
811 // Attempt to rewrite iload, iload -> fast_iload2
812 // iload, caload -> fast_icaload
813 // Normal iloads will be rewritten to fast_iload to avoid checking again.
814 switch (*(pc + 2)) {
815 case Bytecodes::_fast_iload:
816 REWRITE_AT_PC(Bytecodes::_fast_iload2);
817 break;
818 case Bytecodes::_caload:
819 REWRITE_AT_PC(Bytecodes::_fast_icaload);
820 break;
821 case Bytecodes::_iload:
822 // Wait until rewritten to _fast_iload.
823 break;
824 default:
825 // Last iload in a (potential) series, don't check again.
826 REWRITE_AT_PC(Bytecodes::_fast_iload);
827 }
828 }
829 // Normal iload handling.
830 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
831 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
832 }
833
834 CASE(_nofast_iload):
835 {
836 // Normal, non-rewritable iload handling.
837 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
838 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
839 }
840
841 CASE(_fast_iload):
842 CASE(_fload):
843 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
844 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
845
846 CASE(_fast_iload2):
847 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
848 SET_STACK_SLOT(LOCALS_SLOT(pc[3]), 1);
849 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
850
851 CASE(_lload):
852 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
853 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
854
855 CASE(_dload):
856 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
857 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
858
859 #undef OPC_LOAD_n
860 #define OPC_LOAD_n(num) \
861 CASE(_iload_##num): \
862 CASE(_fload_##num): \
863 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
864 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
865 \
866 CASE(_lload_##num): \
867 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
868 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
869 CASE(_dload_##num): \
870 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
871 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
872
873 OPC_LOAD_n(0);
874 OPC_LOAD_n(1);
875 OPC_LOAD_n(2);
876 OPC_LOAD_n(3);
877
878 #undef OPC_ALOAD_n
879 #define OPC_ALOAD_n(num) \
880 CASE(_aload_##num): { \
881 oop obj = LOCALS_OBJECT(num); \
882 VERIFY_OOP(obj); \
883 SET_STACK_OBJECT(obj, 0); \
884 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
885 }
886
887 CASE(_aload_0):
888 {
889 /* Maybe rewrite if following bytecode is one of the supported _fast_Xgetfield bytecodes. */
890 if (REWRITE_BYTECODES) {
891 switch (*(pc + 1)) {
892 case Bytecodes::_fast_agetfield:
893 REWRITE_AT_PC(Bytecodes::_fast_aaccess_0);
894 break;
895 case Bytecodes::_fast_fgetfield:
896 REWRITE_AT_PC(Bytecodes::_fast_faccess_0);
897 break;
898 case Bytecodes::_fast_igetfield:
899 REWRITE_AT_PC(Bytecodes::_fast_iaccess_0);
900 break;
901 case Bytecodes::_getfield:
902 case Bytecodes::_nofast_getfield: {
903 /* Otherwise, do nothing here, wait until/if it gets rewritten to _fast_Xgetfield.
904 * Unfortunately, this punishes volatile field access, because it never gets
905 * rewritten. */
906 break;
907 }
908 default:
909 REWRITE_AT_PC(Bytecodes::_fast_aload_0);
910 break;
911 }
912 }
913 // Normal aload_0 handling.
914 VERIFY_OOP(LOCALS_OBJECT(0));
915 SET_STACK_OBJECT(LOCALS_OBJECT(0), 0);
916 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
917 }
918
919 CASE(_nofast_aload_0):
920 {
921 // Normal, non-rewritable aload_0 handling.
922 VERIFY_OOP(LOCALS_OBJECT(0));
923 SET_STACK_OBJECT(LOCALS_OBJECT(0), 0);
924 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
925 }
926
927 OPC_ALOAD_n(1);
928 OPC_ALOAD_n(2);
929 OPC_ALOAD_n(3);
930
931 /* store to a local variable */
932
933 CASE(_astore):
934 astore(topOfStack, -1, locals, pc[1]);
935 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
936
937 CASE(_istore):
938 CASE(_fstore):
939 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
940 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
941
942 CASE(_lstore):
943 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
944 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
945
946 CASE(_dstore):
947 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
948 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
949
950 CASE(_wide): {
951 uint16_t reg = Bytes::get_Java_u2(pc + 2);
952
953 opcode = pc[1];
954
955 // Wide and it's sub-bytecode are counted as separate instructions. If we
956 // don't account for this here, the bytecode trace skips the next bytecode.
957 DO_UPDATE_INSTRUCTION_COUNT(opcode);
958
959 switch(opcode) {
960 case Bytecodes::_aload:
961 VERIFY_OOP(LOCALS_OBJECT(reg));
962 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
963 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
964
965 case Bytecodes::_iload:
966 case Bytecodes::_fload:
967 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
968 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
969
970 case Bytecodes::_lload:
971 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
972 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
973
974 case Bytecodes::_dload:
975 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
976 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
977
978 case Bytecodes::_astore:
979 astore(topOfStack, -1, locals, reg);
980 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
981
982 case Bytecodes::_istore:
983 case Bytecodes::_fstore:
984 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
985 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
986
987 case Bytecodes::_lstore:
988 SET_LOCALS_LONG(STACK_LONG(-1), reg);
989 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
990
991 case Bytecodes::_dstore:
992 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
993 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
994
995 case Bytecodes::_iinc: {
996 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
997 // Be nice to see what this generates.... QQQ
998 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
999 UPDATE_PC_AND_CONTINUE(6);
1000 }
1001 case Bytecodes::_ret:
1002 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
1003 UPDATE_PC_AND_CONTINUE(0);
1004 default:
1005 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode");
1006 }
1007 }
1008
1009
1010 #undef OPC_STORE_n
1011 #define OPC_STORE_n(num) \
1012 CASE(_astore_##num): \
1013 astore(topOfStack, -1, locals, num); \
1014 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1015 CASE(_istore_##num): \
1016 CASE(_fstore_##num): \
1017 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
1018 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1019
1020 OPC_STORE_n(0);
1021 OPC_STORE_n(1);
1022 OPC_STORE_n(2);
1023 OPC_STORE_n(3);
1024
1025 #undef OPC_DSTORE_n
1026 #define OPC_DSTORE_n(num) \
1027 CASE(_dstore_##num): \
1028 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
1029 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1030 CASE(_lstore_##num): \
1031 SET_LOCALS_LONG(STACK_LONG(-1), num); \
1032 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1033
1034 OPC_DSTORE_n(0);
1035 OPC_DSTORE_n(1);
1036 OPC_DSTORE_n(2);
1037 OPC_DSTORE_n(3);
1038
1039 /* stack pop, dup, and insert opcodes */
1040
1041
1042 CASE(_pop): /* Discard the top item on the stack */
1043 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1044
1045
1046 CASE(_pop2): /* Discard the top 2 items on the stack */
1047 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1048
1049
1050 CASE(_dup): /* Duplicate the top item on the stack */
1051 dup(topOfStack);
1052 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1053
1054 CASE(_dup2): /* Duplicate the top 2 items on the stack */
1055 dup2(topOfStack);
1056 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1057
1058 CASE(_dup_x1): /* insert top word two down */
1059 dup_x1(topOfStack);
1060 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1061
1062 CASE(_dup_x2): /* insert top word three down */
1063 dup_x2(topOfStack);
1064 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1065
1066 CASE(_dup2_x1): /* insert top 2 slots three down */
1067 dup2_x1(topOfStack);
1068 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1069
1070 CASE(_dup2_x2): /* insert top 2 slots four down */
1071 dup2_x2(topOfStack);
1072 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1073
1074 CASE(_swap): { /* swap top two elements on the stack */
1075 swap(topOfStack);
1076 UPDATE_PC_AND_CONTINUE(1);
1077 }
1078
1079 /* Perform various binary integer operations */
1080
1081 #undef OPC_INT_BINARY
1082 #define OPC_INT_BINARY(opcname, opname, test) \
1083 CASE(_i##opcname): \
1084 if (test && (STACK_INT(-1) == 0)) { \
1085 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1086 "/ by zero"); \
1087 } \
1088 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1089 STACK_INT(-1)), \
1090 -2); \
1091 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1092 CASE(_l##opcname): \
1093 { \
1094 if (test) { \
1095 jlong l1 = STACK_LONG(-1); \
1096 if (VMlongEqz(l1)) { \
1097 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1098 "/ by long zero"); \
1099 } \
1100 } \
1101 /* First long at (-1,-2) next long at (-3,-4) */ \
1102 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
1103 STACK_LONG(-1)), \
1104 -3); \
1105 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1106 }
1107
1108 OPC_INT_BINARY(add, Add, 0);
1109 OPC_INT_BINARY(sub, Sub, 0);
1110 OPC_INT_BINARY(mul, Mul, 0);
1111 OPC_INT_BINARY(and, And, 0);
1112 OPC_INT_BINARY(or, Or, 0);
1113 OPC_INT_BINARY(xor, Xor, 0);
1114 OPC_INT_BINARY(div, Div, 1);
1115 OPC_INT_BINARY(rem, Rem, 1);
1116
1117
1118 /* Perform various binary floating number operations */
1119 /* On some machine/platforms/compilers div zero check can be implicit */
1120
1121 #undef OPC_FLOAT_BINARY
1122 #define OPC_FLOAT_BINARY(opcname, opname) \
1123 CASE(_d##opcname): { \
1124 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
1125 STACK_DOUBLE(-1)), \
1126 -3); \
1127 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1128 } \
1129 CASE(_f##opcname): \
1130 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
1131 STACK_FLOAT(-1)), \
1132 -2); \
1133 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1134
1135
1136 OPC_FLOAT_BINARY(add, Add);
1137 OPC_FLOAT_BINARY(sub, Sub);
1138 OPC_FLOAT_BINARY(mul, Mul);
1139 OPC_FLOAT_BINARY(div, Div);
1140 OPC_FLOAT_BINARY(rem, Rem);
1141
1142 /* Shift operations
1143 * Shift left int and long: ishl, lshl
1144 * Logical shift right int and long w/zero extension: iushr, lushr
1145 * Arithmetic shift right int and long w/sign extension: ishr, lshr
1146 */
1147
1148 #undef OPC_SHIFT_BINARY
1149 #define OPC_SHIFT_BINARY(opcname, opname) \
1150 CASE(_i##opcname): \
1151 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1152 STACK_INT(-1)), \
1153 -2); \
1154 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1155 CASE(_l##opcname): \
1156 { \
1157 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
1158 STACK_INT(-1)), \
1159 -2); \
1160 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1161 }
1162
1163 OPC_SHIFT_BINARY(shl, Shl);
1164 OPC_SHIFT_BINARY(shr, Shr);
1165 OPC_SHIFT_BINARY(ushr, Ushr);
1166
1167 /* Increment local variable by constant */
1168 CASE(_iinc):
1169 {
1170 // locals[pc[1]].j.i += (jbyte)(pc[2]);
1171 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
1172 UPDATE_PC_AND_CONTINUE(3);
1173 }
1174
1175 /* negate the value on the top of the stack */
1176
1177 CASE(_ineg):
1178 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
1179 UPDATE_PC_AND_CONTINUE(1);
1180
1181 CASE(_fneg):
1182 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
1183 UPDATE_PC_AND_CONTINUE(1);
1184
1185 CASE(_lneg):
1186 {
1187 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
1188 UPDATE_PC_AND_CONTINUE(1);
1189 }
1190
1191 CASE(_dneg):
1192 {
1193 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
1194 UPDATE_PC_AND_CONTINUE(1);
1195 }
1196
1197 /* Conversion operations */
1198
1199 CASE(_i2f): /* convert top of stack int to float */
1200 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
1201 UPDATE_PC_AND_CONTINUE(1);
1202
1203 CASE(_i2l): /* convert top of stack int to long */
1204 {
1205 // this is ugly QQQ
1206 jlong r = VMint2Long(STACK_INT(-1));
1207 MORE_STACK(-1); // Pop
1208 SET_STACK_LONG(r, 1);
1209
1210 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1211 }
1212
1213 CASE(_i2d): /* convert top of stack int to double */
1214 {
1215 // this is ugly QQQ (why cast to jlong?? )
1216 jdouble r = (jlong)STACK_INT(-1);
1217 MORE_STACK(-1); // Pop
1218 SET_STACK_DOUBLE(r, 1);
1219
1220 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1221 }
1222
1223 CASE(_l2i): /* convert top of stack long to int */
1224 {
1225 jint r = VMlong2Int(STACK_LONG(-1));
1226 MORE_STACK(-2); // Pop
1227 SET_STACK_INT(r, 0);
1228 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1229 }
1230
1231 CASE(_l2f): /* convert top of stack long to float */
1232 {
1233 jlong r = STACK_LONG(-1);
1234 MORE_STACK(-2); // Pop
1235 SET_STACK_FLOAT(VMlong2Float(r), 0);
1236 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1237 }
1238
1239 CASE(_l2d): /* convert top of stack long to double */
1240 {
1241 jlong r = STACK_LONG(-1);
1242 MORE_STACK(-2); // Pop
1243 SET_STACK_DOUBLE(VMlong2Double(r), 1);
1244 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1245 }
1246
1247 CASE(_f2i): /* Convert top of stack float to int */
1248 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
1249 UPDATE_PC_AND_CONTINUE(1);
1250
1251 CASE(_f2l): /* convert top of stack float to long */
1252 {
1253 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
1254 MORE_STACK(-1); // POP
1255 SET_STACK_LONG(r, 1);
1256 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1257 }
1258
1259 CASE(_f2d): /* convert top of stack float to double */
1260 {
1261 jfloat f;
1262 jdouble r;
1263 f = STACK_FLOAT(-1);
1264 r = (jdouble) f;
1265 MORE_STACK(-1); // POP
1266 SET_STACK_DOUBLE(r, 1);
1267 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1268 }
1269
1270 CASE(_d2i): /* convert top of stack double to int */
1271 {
1272 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
1273 MORE_STACK(-2);
1274 SET_STACK_INT(r1, 0);
1275 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1276 }
1277
1278 CASE(_d2f): /* convert top of stack double to float */
1279 {
1280 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
1281 MORE_STACK(-2);
1282 SET_STACK_FLOAT(r1, 0);
1283 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1284 }
1285
1286 CASE(_d2l): /* convert top of stack double to long */
1287 {
1288 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
1289 MORE_STACK(-2);
1290 SET_STACK_LONG(r1, 1);
1291 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1292 }
1293
1294 CASE(_i2b):
1295 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
1296 UPDATE_PC_AND_CONTINUE(1);
1297
1298 CASE(_i2c):
1299 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
1300 UPDATE_PC_AND_CONTINUE(1);
1301
1302 CASE(_i2s):
1303 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
1304 UPDATE_PC_AND_CONTINUE(1);
1305
1306 /* comparison operators */
1307
1308
1309 #define COMPARISON_OP(name, comparison) \
1310 CASE(_if_icmp##name): { \
1311 int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \
1312 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1313 address branch_pc = pc; \
1314 UPDATE_PC_AND_TOS(skip, -2); \
1315 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1316 CONTINUE; \
1317 } \
1318 CASE(_if##name): { \
1319 int skip = (STACK_INT(-1) comparison 0) \
1320 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1321 address branch_pc = pc; \
1322 UPDATE_PC_AND_TOS(skip, -1); \
1323 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1324 CONTINUE; \
1325 }
1326
1327 #define COMPARISON_OP2(name, comparison) \
1328 COMPARISON_OP(name, comparison) \
1329 CASE(_if_acmp##name): { \
1330 int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \
1331 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1332 address branch_pc = pc; \
1333 UPDATE_PC_AND_TOS(skip, -2); \
1334 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1335 CONTINUE; \
1336 }
1337
1338 #define NULL_COMPARISON_NOT_OP(name) \
1339 CASE(_if##name): { \
1340 int skip = (!(STACK_OBJECT(-1) == nullptr)) \
1341 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1342 address branch_pc = pc; \
1343 UPDATE_PC_AND_TOS(skip, -1); \
1344 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1345 CONTINUE; \
1346 }
1347
1348 #define NULL_COMPARISON_OP(name) \
1349 CASE(_if##name): { \
1350 int skip = ((STACK_OBJECT(-1) == nullptr)) \
1351 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1352 address branch_pc = pc; \
1353 UPDATE_PC_AND_TOS(skip, -1); \
1354 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1355 CONTINUE; \
1356 }
1357 COMPARISON_OP(lt, <);
1358 COMPARISON_OP(gt, >);
1359 COMPARISON_OP(le, <=);
1360 COMPARISON_OP(ge, >=);
1361 COMPARISON_OP2(eq, ==); /* include ref comparison */
1362 COMPARISON_OP2(ne, !=); /* include ref comparison */
1363 NULL_COMPARISON_OP(null);
1364 NULL_COMPARISON_NOT_OP(nonnull);
1365
1366 /* Goto pc at specified offset in switch table. */
1367
1368 CASE(_tableswitch): {
1369 jint* lpc = (jint*)VMalignWordUp(pc+1);
1370 int32_t key = STACK_INT(-1);
1371 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
1372 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
1373 int32_t skip;
1374 key -= low;
1375 if (((uint32_t) key > (uint32_t)(high - low))) {
1376 skip = Bytes::get_Java_u4((address)&lpc[0]);
1377 } else {
1378 skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
1379 }
1380 // Does this really need a full backedge check (osr)?
1381 address branch_pc = pc;
1382 UPDATE_PC_AND_TOS(skip, -1);
1383 DO_BACKEDGE_CHECKS(skip, branch_pc);
1384 CONTINUE;
1385 }
1386
1387 /* Goto pc whose table entry matches specified key. */
1388
1389 CASE(_lookupswitch): {
1390 jint* lpc = (jint*)VMalignWordUp(pc+1);
1391 int32_t key = STACK_INT(-1);
1392 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
1393 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
1394 while (--npairs >= 0) {
1395 lpc += 2;
1396 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
1397 skip = Bytes::get_Java_u4((address)&lpc[1]);
1398 break;
1399 }
1400 }
1401 address branch_pc = pc;
1402 UPDATE_PC_AND_TOS(skip, -1);
1403 DO_BACKEDGE_CHECKS(skip, branch_pc);
1404 CONTINUE;
1405 }
1406
1407 CASE(_fcmpl):
1408 CASE(_fcmpg):
1409 {
1410 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
1411 STACK_FLOAT(-1),
1412 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
1413 -2);
1414 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1415 }
1416
1417 CASE(_dcmpl):
1418 CASE(_dcmpg):
1419 {
1420 int r = VMdoubleCompare(STACK_DOUBLE(-3),
1421 STACK_DOUBLE(-1),
1422 (opcode == Bytecodes::_dcmpl ? -1 : 1));
1423 MORE_STACK(-4); // Pop
1424 SET_STACK_INT(r, 0);
1425 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1426 }
1427
1428 CASE(_lcmp):
1429 {
1430 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
1431 MORE_STACK(-4);
1432 SET_STACK_INT(r, 0);
1433 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1434 }
1435
1436
1437 /* Return from a method */
1438
1439 CASE(_areturn):
1440 CASE(_ireturn):
1441 CASE(_freturn):
1442 CASE(_lreturn):
1443 CASE(_dreturn):
1444 CASE(_return): {
1445 // Allow a safepoint before returning to frame manager.
1446 RETURN_SAFEPOINT;
1447 goto handle_return;
1448 }
1449
1450 CASE(_return_register_finalizer): {
1451 oop rcvr = LOCALS_OBJECT(0);
1452 VERIFY_OOP(rcvr);
1453 if (rcvr->klass()->has_finalizer()) {
1454 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
1455 }
1456 goto handle_return;
1457 }
1458
1459 /* Array access byte-codes */
1460
1461 #define ARRAY_INDEX_CHECK(arrObj, index) \
1462 /* Two integers, the additional message, and the null-terminator */ \
1463 char message[2 * jintAsStringSize + 33]; \
1464 CHECK_NULL(arrObj); \
1465 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
1466 jio_snprintf(message, sizeof(message), \
1467 "Index %d out of bounds for length %d", \
1468 index, arrObj->length()); \
1469 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
1470 message); \
1471 }
1472
1473 /* Every array access byte-code starts out like this */
1474 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
1475 #define ARRAY_INTRO(arrayOff) \
1476 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
1477 jint index = STACK_INT(arrayOff + 1); \
1478 ARRAY_INDEX_CHECK(arrObj, index)
1479
1480 /* 32-bit loads. These handle conversion from < 32-bit types */
1481 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
1482 { \
1483 ARRAY_INTRO(-2); \
1484 (void)extra; \
1485 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
1486 -2); \
1487 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1488 }
1489
1490 /* 64-bit loads */
1491 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
1492 { \
1493 ARRAY_INTRO(-2); \
1494 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
1495 (void)extra; \
1496 UPDATE_PC_AND_CONTINUE(1); \
1497 }
1498
1499 CASE(_iaload):
1500 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
1501 CASE(_faload):
1502 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1503 CASE(_aaload): {
1504 ARRAY_INTRO(-2);
1505 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
1506 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1507 }
1508 CASE(_baload):
1509 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
1510 CASE(_caload):
1511 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
1512 CASE(_saload):
1513 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
1514 CASE(_laload):
1515 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
1516 CASE(_daload):
1517 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1518
1519 CASE(_fast_icaload): {
1520 // Custom fast access for iload,caload pair.
1521 arrayOop arrObj = (arrayOop) STACK_OBJECT(-1);
1522 jint index = LOCALS_INT(pc[1]);
1523 ARRAY_INDEX_CHECK(arrObj, index);
1524 SET_STACK_INT(*(jchar *)(((address) arrObj->base(T_CHAR)) + index * sizeof(jchar)), -1);
1525 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 0);
1526 }
1527
1528 /* 32-bit stores. These handle conversion to < 32-bit types */
1529 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
1530 { \
1531 ARRAY_INTRO(-3); \
1532 (void)extra; \
1533 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1534 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
1535 }
1536
1537 /* 64-bit stores */
1538 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
1539 { \
1540 ARRAY_INTRO(-4); \
1541 (void)extra; \
1542 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1543 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
1544 }
1545
1546 CASE(_iastore):
1547 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
1548 CASE(_fastore):
1549 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1550 /*
1551 * This one looks different because of the assignability check
1552 */
1553 CASE(_aastore): {
1554 oop rhsObject = STACK_OBJECT(-1);
1555 VERIFY_OOP(rhsObject);
1556 ARRAY_INTRO( -3);
1557 // arrObj, index are set
1558 if (rhsObject != nullptr) {
1559 /* Check assignability of rhsObject into arrObj */
1560 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
1561 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
1562 //
1563 // Check for compatibility. This check must not GC!!
1564 // Seems way more expensive now that we must dispatch
1565 //
1566 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
1567 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "");
1568 }
1569 }
1570 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
1571 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1572 }
1573 CASE(_bastore): {
1574 ARRAY_INTRO(-3);
1575 int item = STACK_INT(-1);
1576 // if it is a T_BOOLEAN array, mask the stored value to 0/1
1577 if (arrObj->klass() == Universe::boolArrayKlass()) {
1578 item &= 1;
1579 } else {
1580 assert(arrObj->klass() == Universe::byteArrayKlass(),
1581 "should be byte array otherwise");
1582 }
1583 ((typeArrayOop)arrObj)->byte_at_put(index, item);
1584 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1585 }
1586 CASE(_castore):
1587 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
1588 CASE(_sastore):
1589 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
1590 CASE(_lastore):
1591 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
1592 CASE(_dastore):
1593 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1594
1595 CASE(_arraylength):
1596 {
1597 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
1598 CHECK_NULL(ary);
1599 SET_STACK_INT(ary->length(), -1);
1600 UPDATE_PC_AND_CONTINUE(1);
1601 }
1602
1603 /* monitorenter and monitorexit for locking/unlocking an object */
1604
1605 CASE(_monitorenter): {
1606 oop lockee = STACK_OBJECT(-1);
1607 // derefing's lockee ought to provoke implicit null check
1608 CHECK_NULL(lockee);
1609 // find a free monitor or one already allocated for this object
1610 // if we find a matching object then we need a new monitor
1611 // since this is recursive enter
1612 BasicObjectLock* limit = istate->monitor_base();
1613 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1614 BasicObjectLock* entry = nullptr;
1615 while (most_recent != limit ) {
1616 if (most_recent->obj() == nullptr) entry = most_recent;
1617 else if (most_recent->obj() == lockee) break;
1618 most_recent++;
1619 }
1620 if (entry != nullptr) {
1621 entry->set_obj(lockee);
1622 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1623 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1624 } else {
1625 istate->set_msg(more_monitors);
1626 UPDATE_PC_AND_RETURN(0); // Re-execute
1627 }
1628 }
1629
1630 CASE(_monitorexit): {
1631 oop lockee = STACK_OBJECT(-1);
1632 CHECK_NULL(lockee);
1633 // derefing's lockee ought to provoke implicit null check
1634 // find our monitor slot
1635 BasicObjectLock* limit = istate->monitor_base();
1636 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1637 while (most_recent != limit ) {
1638 if ((most_recent)->obj() == lockee) {
1639 BasicLock* lock = most_recent->lock();
1640 InterpreterRuntime::monitorexit(most_recent);
1641 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1642 }
1643 most_recent++;
1644 }
1645 // Need to throw illegal monitor state exception
1646 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1647 ShouldNotReachHere();
1648 }
1649
1650 /* All of the non-quick opcodes. */
1651
1652 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1653 * constant pool index in the instruction.
1654 */
1655 CASE(_getfield):
1656 CASE(_nofast_getfield):
1657 CASE(_getstatic):
1658 {
1659 u2 index;
1660 index = Bytes::get_native_u2(pc+1);
1661 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
1662
1663 // QQQ Need to make this as inlined as possible. Probably need to
1664 // split all the bytecode cases out so c++ compiler has a chance
1665 // for constant prop to fold everything possible away.
1666
1667 // Interpreter runtime does not expect "nofast" opcodes,
1668 // prepare the vanilla opcode for it.
1669 Bytecodes::Code code = (Bytecodes::Code)opcode;
1670 if (code == Bytecodes::_nofast_getfield) {
1671 code = Bytecodes::_getfield;
1672 }
1673
1674 if (!entry->is_resolved(code)) {
1675 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, code),
1676 handle_exception);
1677 entry = cp->resolved_field_entry_at(index);
1678 }
1679
1680 oop obj;
1681 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
1682 Klass* k = entry->field_holder();
1683 obj = k->java_mirror();
1684 MORE_STACK(1); // Assume single slot push
1685 } else {
1686 obj = STACK_OBJECT(-1);
1687 CHECK_NULL(obj);
1688 // Check if we can rewrite non-volatile _getfield to one of the _fast_Xgetfield.
1689 if (REWRITE_BYTECODES && !entry->is_volatile() &&
1690 ((Bytecodes::Code)opcode != Bytecodes::_nofast_getfield)) {
1691 // Rewrite current BC to _fast_Xgetfield.
1692 REWRITE_AT_PC(fast_get_type((TosState)(entry->tos_state())));
1693 }
1694 }
1695
1696 MAYBE_POST_FIELD_ACCESS(obj);
1697
1698 //
1699 // Now store the result on the stack
1700 //
1701 TosState tos_type = (TosState)(entry->tos_state());
1702 int field_offset = entry->field_offset();
1703 if (entry->is_volatile()) {
1704 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
1705 OrderAccess::fence();
1706 }
1707 switch (tos_type) {
1708 case btos:
1709 case ztos:
1710 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
1711 break;
1712 case ctos:
1713 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
1714 break;
1715 case stos:
1716 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
1717 break;
1718 case itos:
1719 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
1720 break;
1721 case ftos:
1722 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
1723 break;
1724 case ltos:
1725 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
1726 MORE_STACK(1);
1727 break;
1728 case dtos:
1729 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
1730 MORE_STACK(1);
1731 break;
1732 case atos: {
1733 oop val = obj->obj_field_acquire(field_offset);
1734 VERIFY_OOP(val);
1735 SET_STACK_OBJECT(val, -1);
1736 break;
1737 }
1738 default:
1739 ShouldNotReachHere();
1740 }
1741 } else {
1742 switch (tos_type) {
1743 case btos:
1744 case ztos:
1745 SET_STACK_INT(obj->byte_field(field_offset), -1);
1746 break;
1747 case ctos:
1748 SET_STACK_INT(obj->char_field(field_offset), -1);
1749 break;
1750 case stos:
1751 SET_STACK_INT(obj->short_field(field_offset), -1);
1752 break;
1753 case itos:
1754 SET_STACK_INT(obj->int_field(field_offset), -1);
1755 break;
1756 case ftos:
1757 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
1758 break;
1759 case ltos:
1760 SET_STACK_LONG(obj->long_field(field_offset), 0);
1761 MORE_STACK(1);
1762 break;
1763 case dtos:
1764 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
1765 MORE_STACK(1);
1766 break;
1767 case atos: {
1768 oop val = obj->obj_field(field_offset);
1769 VERIFY_OOP(val);
1770 SET_STACK_OBJECT(val, -1);
1771 break;
1772 }
1773 default:
1774 ShouldNotReachHere();
1775 }
1776 }
1777
1778 UPDATE_PC_AND_CONTINUE(3);
1779 }
1780
1781 CASE(_putfield):
1782 CASE(_nofast_putfield):
1783 CASE(_putstatic):
1784 {
1785 u2 index = Bytes::get_native_u2(pc+1);
1786 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
1787
1788 // Interpreter runtime does not expect "nofast" opcodes,
1789 // prepare the vanilla opcode for it.
1790 Bytecodes::Code code = (Bytecodes::Code)opcode;
1791 if (code == Bytecodes::_nofast_putfield) {
1792 code = Bytecodes::_putfield;
1793 }
1794
1795 if (!entry->is_resolved(code)) {
1796 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, code),
1797 handle_exception);
1798 entry = cp->resolved_field_entry_at(index);
1799 }
1800
1801 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
1802 // out so c++ compiler has a chance for constant prop to fold everything possible away.
1803
1804 oop obj;
1805 int count;
1806 TosState tos_type = (TosState)(entry->tos_state());
1807
1808 count = -1;
1809 if (tos_type == ltos || tos_type == dtos) {
1810 --count;
1811 }
1812 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
1813 Klass* k = entry->field_holder();
1814 obj = k->java_mirror();
1815 } else {
1816 --count;
1817 obj = STACK_OBJECT(count);
1818 CHECK_NULL(obj);
1819
1820 // Check if we can rewrite non-volatile _putfield to one of the _fast_Xputfield.
1821 if (REWRITE_BYTECODES && !entry->is_volatile() &&
1822 ((Bytecodes::Code)opcode != Bytecodes::_nofast_putfield)) {
1823 // Rewrite current BC to _fast_Xputfield.
1824 REWRITE_AT_PC(fast_put_type((TosState)(entry->tos_state())));
1825 }
1826 }
1827
1828 MAYBE_POST_FIELD_MODIFICATION(obj);
1829
1830 //
1831 // Now store the result
1832 //
1833 int field_offset = entry->field_offset();
1834 if (entry->is_volatile()) {
1835 switch (tos_type) {
1836 case ztos:
1837 obj->release_byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
1838 break;
1839 case btos:
1840 obj->release_byte_field_put(field_offset, STACK_INT(-1));
1841 break;
1842 case ctos:
1843 obj->release_char_field_put(field_offset, STACK_INT(-1));
1844 break;
1845 case stos:
1846 obj->release_short_field_put(field_offset, STACK_INT(-1));
1847 break;
1848 case itos:
1849 obj->release_int_field_put(field_offset, STACK_INT(-1));
1850 break;
1851 case ftos:
1852 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
1853 break;
1854 case ltos:
1855 obj->release_long_field_put(field_offset, STACK_LONG(-1));
1856 break;
1857 case dtos:
1858 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
1859 break;
1860 case atos: {
1861 oop val = STACK_OBJECT(-1);
1862 VERIFY_OOP(val);
1863 obj->release_obj_field_put(field_offset, val);
1864 break;
1865 }
1866 default:
1867 ShouldNotReachHere();
1868 }
1869 OrderAccess::storeload();
1870 } else {
1871 switch (tos_type) {
1872 case ztos:
1873 obj->byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
1874 break;
1875 case btos:
1876 obj->byte_field_put(field_offset, STACK_INT(-1));
1877 break;
1878 case ctos:
1879 obj->char_field_put(field_offset, STACK_INT(-1));
1880 break;
1881 case stos:
1882 obj->short_field_put(field_offset, STACK_INT(-1));
1883 break;
1884 case itos:
1885 obj->int_field_put(field_offset, STACK_INT(-1));
1886 break;
1887 case ftos:
1888 obj->float_field_put(field_offset, STACK_FLOAT(-1));
1889 break;
1890 case ltos:
1891 obj->long_field_put(field_offset, STACK_LONG(-1));
1892 break;
1893 case dtos:
1894 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
1895 break;
1896 case atos: {
1897 oop val = STACK_OBJECT(-1);
1898 VERIFY_OOP(val);
1899 obj->obj_field_put(field_offset, val);
1900 break;
1901 }
1902 default:
1903 ShouldNotReachHere();
1904 }
1905 }
1906
1907 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
1908 }
1909
1910 CASE(_new): {
1911 u2 index = Bytes::get_Java_u2(pc+1);
1912
1913 // Attempt TLAB allocation first.
1914 //
1915 // To do this, we need to make sure:
1916 // - klass is initialized
1917 // - klass can be fastpath allocated (e.g. does not have finalizer)
1918 // - TLAB accepts the allocation
1919 ConstantPool* constants = istate->method()->constants();
1920 if (UseTLAB && !constants->tag_at(index).is_unresolved_klass()) {
1921 Klass* entry = constants->resolved_klass_at(index);
1922 InstanceKlass* ik = InstanceKlass::cast(entry);
1923 if (ik->is_initialized() && ik->can_be_fastpath_allocated()) {
1924 size_t obj_size = ik->size_helper();
1925 HeapWord* result = THREAD->tlab().allocate(obj_size);
1926 if (result != nullptr) {
1927 // Initialize object field block.
1928 if (!ZeroTLAB) {
1929 // The TLAB was not pre-zeroed, we need to clear the memory here.
1930 size_t hdr_size = oopDesc::header_size();
1931 Copy::fill_to_words(result + hdr_size, obj_size - hdr_size, 0);
1932 }
1933
1934 // Initialize header, mirrors MemAllocator.
1935 if (UseCompactObjectHeaders) {
1936 oopDesc::release_set_mark(result, ik->prototype_header());
1937 } else {
1938 oopDesc::set_mark(result, markWord::prototype());
1939 if (oopDesc::has_klass_gap()) {
1940 oopDesc::set_klass_gap(result, 0);
1941 }
1942 oopDesc::release_set_klass(result, ik);
1943 }
1944 oop obj = cast_to_oop(result);
1945
1946 // Must prevent reordering of stores for object initialization
1947 // with stores that publish the new object.
1948 OrderAccess::storestore();
1949 SET_STACK_OBJECT(obj, 0);
1950 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1951 }
1952 }
1953 }
1954 // Slow case allocation
1955 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
1956 handle_exception);
1957 // Must prevent reordering of stores for object initialization
1958 // with stores that publish the new object.
1959 OrderAccess::storestore();
1960 SET_STACK_OBJECT(THREAD->vm_result_oop(), 0);
1961 THREAD->set_vm_result_oop(nullptr);
1962 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1963 }
1964 CASE(_anewarray): {
1965 u2 index = Bytes::get_Java_u2(pc+1);
1966 jint size = STACK_INT(-1);
1967 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
1968 handle_exception);
1969 // Must prevent reordering of stores for object initialization
1970 // with stores that publish the new object.
1971 OrderAccess::storestore();
1972 SET_STACK_OBJECT(THREAD->vm_result_oop(), -1);
1973 THREAD->set_vm_result_oop(nullptr);
1974 UPDATE_PC_AND_CONTINUE(3);
1975 }
1976 CASE(_multianewarray): {
1977 jint dims = *(pc+3);
1978 jint size = STACK_INT(-1);
1979 // stack grows down, dimensions are up!
1980 jint *dimarray =
1981 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
1982 Interpreter::stackElementWords-1];
1983 //adjust pointer to start of stack element
1984 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
1985 handle_exception);
1986 // Must prevent reordering of stores for object initialization
1987 // with stores that publish the new object.
1988 OrderAccess::storestore();
1989 SET_STACK_OBJECT(THREAD->vm_result_oop(), -dims);
1990 THREAD->set_vm_result_oop(nullptr);
1991 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
1992 }
1993 CASE(_checkcast):
1994 if (STACK_OBJECT(-1) != nullptr) {
1995 VERIFY_OOP(STACK_OBJECT(-1));
1996 u2 index = Bytes::get_Java_u2(pc+1);
1997 // Constant pool may have actual klass or unresolved klass. If it is
1998 // unresolved we must resolve it.
1999 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2000 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2001 }
2002 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index);
2003 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
2004 //
2005 // Check for compatibility. This check must not GC!!
2006 // Seems way more expensive now that we must dispatch.
2007 //
2008 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
2009 ResourceMark rm(THREAD);
2010 char* message = SharedRuntime::generate_class_cast_message(
2011 objKlass, klassOf);
2012 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message);
2013 }
2014 }
2015 UPDATE_PC_AND_CONTINUE(3);
2016
2017 CASE(_instanceof):
2018 if (STACK_OBJECT(-1) == nullptr) {
2019 SET_STACK_INT(0, -1);
2020 } else {
2021 VERIFY_OOP(STACK_OBJECT(-1));
2022 u2 index = Bytes::get_Java_u2(pc+1);
2023 // Constant pool may have actual klass or unresolved klass. If it is
2024 // unresolved we must resolve it.
2025 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2026 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2027 }
2028 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index);
2029 Klass* objKlass = STACK_OBJECT(-1)->klass();
2030 //
2031 // Check for compatibility. This check must not GC!!
2032 // Seems way more expensive now that we must dispatch.
2033 //
2034 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
2035 SET_STACK_INT(1, -1);
2036 } else {
2037 SET_STACK_INT(0, -1);
2038 }
2039 }
2040 UPDATE_PC_AND_CONTINUE(3);
2041
2042 CASE(_ldc_w):
2043 CASE(_ldc):
2044 {
2045 u2 index;
2046 bool wide = false;
2047 int incr = 2; // frequent case
2048 if (opcode == Bytecodes::_ldc) {
2049 index = pc[1];
2050 } else {
2051 index = Bytes::get_Java_u2(pc+1);
2052 incr = 3;
2053 wide = true;
2054 }
2055
2056 ConstantPool* constants = METHOD->constants();
2057 switch (constants->tag_at(index).value()) {
2058 case JVM_CONSTANT_Integer:
2059 SET_STACK_INT(constants->int_at(index), 0);
2060 break;
2061
2062 case JVM_CONSTANT_Float:
2063 SET_STACK_FLOAT(constants->float_at(index), 0);
2064 break;
2065
2066 case JVM_CONSTANT_String:
2067 {
2068 oop result = constants->resolved_reference_at(index);
2069 if (result == nullptr) {
2070 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2071 SET_STACK_OBJECT(THREAD->vm_result_oop(), 0);
2072 THREAD->set_vm_result_oop(nullptr);
2073 } else {
2074 VERIFY_OOP(result);
2075 SET_STACK_OBJECT(result, 0);
2076 }
2077 break;
2078 }
2079
2080 case JVM_CONSTANT_Class:
2081 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
2082 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
2083 break;
2084
2085 case JVM_CONSTANT_UnresolvedClass:
2086 case JVM_CONSTANT_UnresolvedClassInError:
2087 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
2088 SET_STACK_OBJECT(THREAD->vm_result_oop(), 0);
2089 THREAD->set_vm_result_oop(nullptr);
2090 break;
2091
2092 case JVM_CONSTANT_Dynamic:
2093 case JVM_CONSTANT_DynamicInError:
2094 {
2095 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2096 oop result = THREAD->vm_result_oop();
2097 VERIFY_OOP(result);
2098
2099 jvalue value;
2100 BasicType type = java_lang_boxing_object::get_value(result, &value);
2101 switch (type) {
2102 case T_FLOAT: SET_STACK_FLOAT(value.f, 0); break;
2103 case T_INT: SET_STACK_INT(value.i, 0); break;
2104 case T_SHORT: SET_STACK_INT(value.s, 0); break;
2105 case T_BYTE: SET_STACK_INT(value.b, 0); break;
2106 case T_CHAR: SET_STACK_INT(value.c, 0); break;
2107 case T_BOOLEAN: SET_STACK_INT(value.z, 0); break;
2108 default: ShouldNotReachHere();
2109 }
2110
2111 break;
2112 }
2113
2114 default: ShouldNotReachHere();
2115 }
2116 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2117 }
2118
2119 CASE(_ldc2_w):
2120 {
2121 u2 index = Bytes::get_Java_u2(pc+1);
2122
2123 ConstantPool* constants = METHOD->constants();
2124 switch (constants->tag_at(index).value()) {
2125
2126 case JVM_CONSTANT_Long:
2127 SET_STACK_LONG(constants->long_at(index), 1);
2128 break;
2129
2130 case JVM_CONSTANT_Double:
2131 SET_STACK_DOUBLE(constants->double_at(index), 1);
2132 break;
2133
2134 case JVM_CONSTANT_Dynamic:
2135 case JVM_CONSTANT_DynamicInError:
2136 {
2137 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2138 oop result = THREAD->vm_result_oop();
2139 VERIFY_OOP(result);
2140
2141 jvalue value;
2142 BasicType type = java_lang_boxing_object::get_value(result, &value);
2143 switch (type) {
2144 case T_DOUBLE: SET_STACK_DOUBLE(value.d, 1); break;
2145 case T_LONG: SET_STACK_LONG(value.j, 1); break;
2146 default: ShouldNotReachHere();
2147 }
2148
2149 break;
2150 }
2151
2152 default: ShouldNotReachHere();
2153 }
2154 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
2155 }
2156
2157 CASE(_fast_aldc_w):
2158 CASE(_fast_aldc): {
2159 u2 index;
2160 int incr;
2161 if (opcode == Bytecodes::_fast_aldc) {
2162 index = pc[1];
2163 incr = 2;
2164 } else {
2165 index = Bytes::get_native_u2(pc+1);
2166 incr = 3;
2167 }
2168
2169 // We are resolved if the resolved_references array contains a non-null object (CallSite, etc.)
2170 // This kind of CP cache entry does not need to match the flags byte, because
2171 // there is a 1-1 relation between bytecode type and CP entry type.
2172 ConstantPool* constants = METHOD->constants();
2173 oop result = constants->resolved_reference_at(index);
2174 if (result == nullptr) {
2175 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
2176 handle_exception);
2177 result = THREAD->vm_result_oop();
2178 }
2179 if (result == Universe::the_null_sentinel())
2180 result = nullptr;
2181
2182 VERIFY_OOP(result);
2183 SET_STACK_OBJECT(result, 0);
2184 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2185 }
2186
2187 CASE(_invokedynamic): {
2188 u4 index = Bytes::get_native_u4(pc+1);
2189 ResolvedIndyEntry* indy_info = cp->resolved_indy_entry_at(index);
2190 if (!indy_info->is_resolved()) {
2191 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2192 handle_exception);
2193 indy_info = cp->resolved_indy_entry_at(index); // get resolved entry
2194 }
2195 Method* method = indy_info->method();
2196 if (VerifyOops) method->verify();
2197
2198 if (indy_info->has_appendix()) {
2199 constantPoolHandle cp(THREAD, METHOD->constants());
2200 SET_STACK_OBJECT(cp->resolved_reference_from_indy(index), 0);
2201 MORE_STACK(1);
2202 }
2203
2204 istate->set_msg(call_method);
2205 istate->set_callee(method);
2206 istate->set_callee_entry_point(method->from_interpreted_entry());
2207 istate->set_bcp_advance(5);
2208
2209 UPDATE_PC_AND_RETURN(0); // I'll be back...
2210 }
2211
2212 CASE(_invokehandle): {
2213
2214 u2 index = Bytes::get_native_u2(pc+1);
2215 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2216
2217 if (! entry->is_resolved((Bytecodes::Code) opcode)) {
2218 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2219 handle_exception);
2220 entry = cp->resolved_method_entry_at(index);
2221 }
2222
2223 Method* method = entry->method();
2224 if (VerifyOops) method->verify();
2225
2226 if (entry->has_appendix()) {
2227 constantPoolHandle cp(THREAD, METHOD->constants());
2228 SET_STACK_OBJECT(cp->cache()->appendix_if_resolved(entry), 0);
2229 MORE_STACK(1);
2230 }
2231
2232 istate->set_msg(call_method);
2233 istate->set_callee(method);
2234 istate->set_callee_entry_point(method->from_interpreted_entry());
2235 istate->set_bcp_advance(3);
2236
2237 UPDATE_PC_AND_RETURN(0); // I'll be back...
2238 }
2239
2240 CASE(_invokeinterface): {
2241 u2 index = Bytes::get_native_u2(pc+1);
2242
2243 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2244 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2245
2246 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2247 if (!entry->is_resolved((Bytecodes::Code)opcode)) {
2248 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2249 handle_exception);
2250 }
2251
2252 istate->set_msg(call_method);
2253
2254 // Special case of invokeinterface called for virtual method of
2255 // java.lang.Object. See cpCache.cpp for details.
2256 Method* callee = nullptr;
2257 if (entry->is_forced_virtual()) {
2258 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2259 if (entry->is_vfinal()) {
2260 callee = entry->method();
2261 } else {
2262 // Get receiver.
2263 int parms = entry->number_of_parameters();
2264 // Same comments as invokevirtual apply here.
2265 oop rcvr = STACK_OBJECT(-parms);
2266 VERIFY_OOP(rcvr);
2267 Klass* rcvrKlass = rcvr->klass();
2268 callee = (Method*) rcvrKlass->method_at_vtable(entry->table_index());
2269 }
2270 } else if (entry->is_vfinal()) {
2271 // private interface method invocations
2272 //
2273 // Ensure receiver class actually implements
2274 // the resolved interface class. The link resolver
2275 // does this, but only for the first time this
2276 // interface is being called.
2277 int parms = entry->number_of_parameters();
2278 oop rcvr = STACK_OBJECT(-parms);
2279 CHECK_NULL(rcvr);
2280 Klass* recv_klass = rcvr->klass();
2281 Klass* resolved_klass = entry->interface_klass();
2282 if (!recv_klass->is_subtype_of(resolved_klass)) {
2283 ResourceMark rm(THREAD);
2284 char buf[200];
2285 jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s",
2286 recv_klass->external_name(),
2287 resolved_klass->external_name());
2288 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
2289 }
2290 callee = entry->method();
2291 }
2292 if (callee != nullptr) {
2293 istate->set_callee(callee);
2294 istate->set_callee_entry_point(callee->from_interpreted_entry());
2295 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2296 istate->set_callee_entry_point(callee->interpreter_entry());
2297 }
2298 istate->set_bcp_advance(5);
2299 UPDATE_PC_AND_RETURN(0); // I'll be back...
2300 }
2301
2302 // this could definitely be cleaned up QQQ
2303 Method *interface_method = entry->method();
2304 InstanceKlass* iclass = interface_method->method_holder();
2305
2306 // get receiver
2307 int parms = entry->number_of_parameters();
2308 oop rcvr = STACK_OBJECT(-parms);
2309 CHECK_NULL(rcvr);
2310 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
2311
2312 // Receiver subtype check against resolved interface klass (REFC).
2313 {
2314 Klass* refc = entry->interface_klass();
2315 itableOffsetEntry* scan;
2316 for (scan = (itableOffsetEntry*) int2->start_of_itable();
2317 scan->interface_klass() != nullptr;
2318 scan++) {
2319 if (scan->interface_klass() == refc) {
2320 break;
2321 }
2322 }
2323 // Check that the entry is non-null. A null entry means
2324 // that the receiver class doesn't implement the
2325 // interface, and wasn't the same as when the caller was
2326 // compiled.
2327 if (scan->interface_klass() == nullptr) {
2328 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "");
2329 }
2330 }
2331
2332 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
2333 int i;
2334 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
2335 if (ki->interface_klass() == iclass) break;
2336 }
2337 // If the interface isn't found, this class doesn't implement this
2338 // interface. The link resolver checks this but only for the first
2339 // time this interface is called.
2340 if (i == int2->itable_length()) {
2341 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(THREAD, rcvr->klass(), iclass),
2342 handle_exception);
2343 }
2344 int mindex = interface_method->itable_index();
2345
2346 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
2347 callee = im[mindex].method();
2348 if (callee == nullptr) {
2349 CALL_VM(InterpreterRuntime::throw_AbstractMethodErrorVerbose(THREAD, rcvr->klass(), interface_method),
2350 handle_exception);
2351 }
2352
2353 istate->set_callee(callee);
2354 istate->set_callee_entry_point(callee->from_interpreted_entry());
2355 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2356 istate->set_callee_entry_point(callee->interpreter_entry());
2357 }
2358 istate->set_bcp_advance(5);
2359 UPDATE_PC_AND_RETURN(0); // I'll be back...
2360 }
2361
2362 CASE(_invokevirtual):
2363 CASE(_invokespecial):
2364 CASE(_invokestatic): {
2365 u2 index = Bytes::get_native_u2(pc+1);
2366
2367 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2368 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2369 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2370
2371 if (!entry->is_resolved((Bytecodes::Code)opcode)) {
2372 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2373 handle_exception);
2374 entry = cp->resolved_method_entry_at(index);
2375 }
2376
2377 istate->set_msg(call_method);
2378 {
2379 Method* callee;
2380 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
2381 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2382 if (entry->is_vfinal()) {
2383 callee = entry->method();
2384 if (REWRITE_BYTECODES && !CDSConfig::is_using_archive() && !CDSConfig::is_dumping_archive()) {
2385 // Rewrite to _fast_invokevfinal.
2386 REWRITE_AT_PC(Bytecodes::_fast_invokevfinal);
2387 }
2388 } else {
2389 // get receiver
2390 int parms = entry->number_of_parameters();
2391 // this works but needs a resourcemark and seems to create a vtable on every call:
2392 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
2393 //
2394 // this fails with an assert
2395 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
2396 // but this works
2397 oop rcvr = STACK_OBJECT(-parms);
2398 VERIFY_OOP(rcvr);
2399 Klass* rcvrKlass = rcvr->klass();
2400 /*
2401 Executing this code in java.lang.String:
2402 public String(char value[]) {
2403 this.count = value.length;
2404 this.value = (char[])value.clone();
2405 }
2406
2407 a find on rcvr->klass() reports:
2408 {type array char}{type array class}
2409 - klass: {other class}
2410
2411 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
2412 because rcvr->klass()->is_instance_klass() == 0
2413 However it seems to have a vtable in the right location. Huh?
2414 Because vtables have the same offset for ArrayKlass and InstanceKlass.
2415 */
2416 callee = (Method*) rcvrKlass->method_at_vtable(entry->table_index());
2417 }
2418 } else {
2419 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
2420 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2421 }
2422 callee = entry->method();
2423 }
2424
2425 istate->set_callee(callee);
2426 istate->set_callee_entry_point(callee->from_interpreted_entry());
2427 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2428 istate->set_callee_entry_point(callee->interpreter_entry());
2429 }
2430 istate->set_bcp_advance(3);
2431 UPDATE_PC_AND_RETURN(0); // I'll be back...
2432 }
2433 }
2434
2435 /* Allocate memory for a new java object. */
2436
2437 CASE(_newarray): {
2438 BasicType atype = (BasicType) *(pc+1);
2439 jint size = STACK_INT(-1);
2440 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
2441 handle_exception);
2442 // Must prevent reordering of stores for object initialization
2443 // with stores that publish the new object.
2444 OrderAccess::storestore();
2445 SET_STACK_OBJECT(THREAD->vm_result_oop(), -1);
2446 THREAD->set_vm_result_oop(nullptr);
2447
2448 UPDATE_PC_AND_CONTINUE(2);
2449 }
2450
2451 /* Throw an exception. */
2452
2453 CASE(_athrow): {
2454 oop except_oop = STACK_OBJECT(-1);
2455 CHECK_NULL(except_oop);
2456 // set pending_exception so we use common code
2457 THREAD->set_pending_exception(except_oop, nullptr, 0);
2458 goto handle_exception;
2459 }
2460
2461 /* goto and jsr. They are exactly the same except jsr pushes
2462 * the address of the next instruction first.
2463 */
2464
2465 CASE(_jsr): {
2466 /* push bytecode index on stack */
2467 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
2468 MORE_STACK(1);
2469 /* FALL THROUGH */
2470 }
2471
2472 CASE(_goto):
2473 {
2474 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
2475 address branch_pc = pc;
2476 UPDATE_PC(offset);
2477 DO_BACKEDGE_CHECKS(offset, branch_pc);
2478 CONTINUE;
2479 }
2480
2481 CASE(_jsr_w): {
2482 /* push return address on the stack */
2483 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
2484 MORE_STACK(1);
2485 /* FALL THROUGH */
2486 }
2487
2488 CASE(_goto_w):
2489 {
2490 int32_t offset = Bytes::get_Java_u4(pc + 1);
2491 address branch_pc = pc;
2492 UPDATE_PC(offset);
2493 DO_BACKEDGE_CHECKS(offset, branch_pc);
2494 CONTINUE;
2495 }
2496
2497 /* return from a jsr or jsr_w */
2498
2499 CASE(_ret): {
2500 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
2501 UPDATE_PC_AND_CONTINUE(0);
2502 }
2503
2504 /* debugger breakpoint */
2505
2506 CASE(_breakpoint): {
2507 Bytecodes::Code original_bytecode;
2508 DECACHE_STATE();
2509 SET_LAST_JAVA_FRAME();
2510 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
2511 METHOD, pc);
2512 RESET_LAST_JAVA_FRAME();
2513 CACHE_STATE();
2514 if (THREAD->has_pending_exception()) goto handle_exception;
2515 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
2516 handle_exception);
2517
2518 opcode = (jubyte)original_bytecode;
2519 goto opcode_switch;
2520 }
2521
2522 CASE(_fast_agetfield): {
2523 u2 index = Bytes::get_native_u2(pc+1);
2524 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2525 int field_offset = entry->field_offset();
2526
2527 oop obj = STACK_OBJECT(-1);
2528 CHECK_NULL(obj);
2529
2530 MAYBE_POST_FIELD_ACCESS(obj);
2531
2532 VERIFY_OOP(obj->obj_field(field_offset));
2533 SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
2534 UPDATE_PC_AND_CONTINUE(3);
2535 }
2536
2537 CASE(_fast_bgetfield): {
2538 u2 index = Bytes::get_native_u2(pc+1);
2539 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2540 int field_offset = entry->field_offset();
2541
2542 oop obj = STACK_OBJECT(-1);
2543 CHECK_NULL(obj);
2544
2545 MAYBE_POST_FIELD_ACCESS(obj);
2546
2547 SET_STACK_INT(obj->byte_field(field_offset), -1);
2548 UPDATE_PC_AND_CONTINUE(3);
2549 }
2550
2551 CASE(_fast_cgetfield): {
2552 u2 index = Bytes::get_native_u2(pc+1);
2553 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2554 int field_offset = entry->field_offset();
2555
2556 oop obj = STACK_OBJECT(-1);
2557 CHECK_NULL(obj);
2558
2559 MAYBE_POST_FIELD_ACCESS(obj);
2560
2561 SET_STACK_INT(obj->char_field(field_offset), -1);
2562 UPDATE_PC_AND_CONTINUE(3);
2563 }
2564
2565 CASE(_fast_dgetfield): {
2566 u2 index = Bytes::get_native_u2(pc+1);
2567 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2568 int field_offset = entry->field_offset();
2569
2570 oop obj = STACK_OBJECT(-1);
2571 CHECK_NULL(obj);
2572
2573 MAYBE_POST_FIELD_ACCESS(obj);
2574
2575 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
2576 MORE_STACK(1);
2577 UPDATE_PC_AND_CONTINUE(3);
2578 }
2579
2580 CASE(_fast_fgetfield): {
2581 u2 index = Bytes::get_native_u2(pc+1);
2582 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2583 int field_offset = entry->field_offset();
2584
2585 oop obj = STACK_OBJECT(-1);
2586 CHECK_NULL(obj);
2587
2588 MAYBE_POST_FIELD_ACCESS(obj);
2589
2590 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
2591 UPDATE_PC_AND_CONTINUE(3);
2592 }
2593
2594 CASE(_fast_igetfield): {
2595 u2 index = Bytes::get_native_u2(pc+1);
2596 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2597 int field_offset = entry->field_offset();
2598
2599 oop obj = STACK_OBJECT(-1);
2600 CHECK_NULL(obj);
2601
2602 MAYBE_POST_FIELD_ACCESS(obj);
2603
2604 SET_STACK_INT(obj->int_field(field_offset), -1);
2605 UPDATE_PC_AND_CONTINUE(3);
2606 }
2607
2608 CASE(_fast_lgetfield): {
2609 u2 index = Bytes::get_native_u2(pc+1);
2610 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2611 int field_offset = entry->field_offset();
2612
2613 oop obj = STACK_OBJECT(-1);
2614 CHECK_NULL(obj);
2615
2616 MAYBE_POST_FIELD_ACCESS(obj);
2617
2618 SET_STACK_LONG(obj->long_field(field_offset), 0);
2619 MORE_STACK(1);
2620 UPDATE_PC_AND_CONTINUE(3);
2621 }
2622
2623 CASE(_fast_sgetfield): {
2624 u2 index = Bytes::get_native_u2(pc+1);
2625 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2626 int field_offset = entry->field_offset();
2627
2628 oop obj = STACK_OBJECT(-1);
2629 CHECK_NULL(obj);
2630
2631 MAYBE_POST_FIELD_ACCESS(obj);
2632
2633 SET_STACK_INT(obj->short_field(field_offset), -1);
2634 UPDATE_PC_AND_CONTINUE(3);
2635 }
2636
2637 CASE(_fast_aputfield): {
2638 u2 index = Bytes::get_native_u2(pc+1);
2639 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2640
2641 oop obj = STACK_OBJECT(-2);
2642 CHECK_NULL(obj);
2643
2644 MAYBE_POST_FIELD_MODIFICATION(obj);
2645
2646 int field_offset = entry->field_offset();
2647 obj->obj_field_put(field_offset, STACK_OBJECT(-1));
2648
2649 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2650 }
2651
2652 CASE(_fast_bputfield): {
2653 u2 index = Bytes::get_native_u2(pc+1);
2654 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2655
2656 oop obj = STACK_OBJECT(-2);
2657 CHECK_NULL(obj);
2658
2659 MAYBE_POST_FIELD_MODIFICATION(obj);
2660
2661 int field_offset = entry->field_offset();
2662 obj->byte_field_put(field_offset, STACK_INT(-1));
2663
2664 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2665 }
2666
2667 CASE(_fast_zputfield): {
2668 u2 index = Bytes::get_native_u2(pc+1);
2669 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2670
2671 oop obj = STACK_OBJECT(-2);
2672 CHECK_NULL(obj);
2673
2674 MAYBE_POST_FIELD_MODIFICATION(obj);
2675
2676 int field_offset = entry->field_offset();
2677 obj->byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
2678
2679 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2680 }
2681
2682 CASE(_fast_cputfield): {
2683 u2 index = Bytes::get_native_u2(pc+1);
2684 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2685
2686 oop obj = STACK_OBJECT(-2);
2687 CHECK_NULL(obj);
2688
2689 MAYBE_POST_FIELD_MODIFICATION(obj);
2690
2691 int field_offset = entry->field_offset();
2692 obj->char_field_put(field_offset, STACK_INT(-1));
2693
2694 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2695 }
2696
2697 CASE(_fast_dputfield): {
2698 u2 index = Bytes::get_native_u2(pc+1);
2699 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2700
2701 oop obj = STACK_OBJECT(-3);
2702 CHECK_NULL(obj);
2703
2704 MAYBE_POST_FIELD_MODIFICATION(obj);
2705
2706 int field_offset = entry->field_offset();
2707 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
2708
2709 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3);
2710 }
2711
2712 CASE(_fast_fputfield): {
2713 u2 index = Bytes::get_native_u2(pc+1);
2714 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2715
2716 oop obj = STACK_OBJECT(-2);
2717 CHECK_NULL(obj);
2718
2719 MAYBE_POST_FIELD_MODIFICATION(obj);
2720
2721 int field_offset = entry->field_offset();
2722 obj->float_field_put(field_offset, STACK_FLOAT(-1));
2723
2724 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2725 }
2726
2727 CASE(_fast_iputfield): {
2728 u2 index = Bytes::get_native_u2(pc+1);
2729 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2730
2731 oop obj = STACK_OBJECT(-2);
2732 CHECK_NULL(obj);
2733
2734 MAYBE_POST_FIELD_MODIFICATION(obj);
2735
2736 int field_offset = entry->field_offset();
2737 obj->int_field_put(field_offset, STACK_INT(-1));
2738
2739 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2740 }
2741
2742 CASE(_fast_lputfield): {
2743 u2 index = Bytes::get_native_u2(pc+1);
2744 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2745
2746 oop obj = STACK_OBJECT(-3);
2747 CHECK_NULL(obj);
2748
2749 MAYBE_POST_FIELD_MODIFICATION(obj);
2750
2751 int field_offset = entry->field_offset();
2752 obj->long_field_put(field_offset, STACK_LONG(-1));
2753
2754 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3);
2755 }
2756
2757 CASE(_fast_sputfield): {
2758 u2 index = Bytes::get_native_u2(pc+1);
2759 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2760
2761 oop obj = STACK_OBJECT(-2);
2762 CHECK_NULL(obj);
2763
2764 MAYBE_POST_FIELD_MODIFICATION(obj);
2765
2766 int field_offset = entry->field_offset();
2767 obj->short_field_put(field_offset, STACK_INT(-1));
2768
2769 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2);
2770 }
2771
2772 CASE(_fast_aload_0): {
2773 oop obj = LOCALS_OBJECT(0);
2774 VERIFY_OOP(obj);
2775 SET_STACK_OBJECT(obj, 0);
2776 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
2777 }
2778
2779 CASE(_fast_aaccess_0): {
2780 u2 index = Bytes::get_native_u2(pc+2);
2781 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2782 int field_offset = entry->field_offset();
2783
2784 oop obj = LOCALS_OBJECT(0);
2785 CHECK_NULL(obj);
2786 VERIFY_OOP(obj);
2787
2788 MAYBE_POST_FIELD_ACCESS(obj);
2789
2790 VERIFY_OOP(obj->obj_field(field_offset));
2791 SET_STACK_OBJECT(obj->obj_field(field_offset), 0);
2792 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
2793 }
2794
2795 CASE(_fast_iaccess_0): {
2796 u2 index = Bytes::get_native_u2(pc+2);
2797 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2798 int field_offset = entry->field_offset();
2799
2800 oop obj = LOCALS_OBJECT(0);
2801 CHECK_NULL(obj);
2802 VERIFY_OOP(obj);
2803
2804 MAYBE_POST_FIELD_ACCESS(obj);
2805
2806 SET_STACK_INT(obj->int_field(field_offset), 0);
2807 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
2808 }
2809
2810 CASE(_fast_faccess_0): {
2811 u2 index = Bytes::get_native_u2(pc+2);
2812 ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index);
2813 int field_offset = entry->field_offset();
2814
2815 oop obj = LOCALS_OBJECT(0);
2816 CHECK_NULL(obj);
2817 VERIFY_OOP(obj);
2818
2819 MAYBE_POST_FIELD_ACCESS(obj);
2820
2821 SET_STACK_FLOAT(obj->float_field(field_offset), 0);
2822 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
2823 }
2824
2825 CASE(_fast_invokevfinal): {
2826 u2 index = Bytes::get_native_u2(pc+1);
2827 ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
2828
2829 assert(entry->is_resolved(Bytecodes::_invokevirtual), "Should be resolved before rewriting");
2830
2831 istate->set_msg(call_method);
2832
2833 CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
2834 Method* callee = entry->method();
2835 istate->set_callee(callee);
2836 if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
2837 istate->set_callee_entry_point(callee->interpreter_entry());
2838 } else {
2839 istate->set_callee_entry_point(callee->from_interpreted_entry());
2840 }
2841 istate->set_bcp_advance(3);
2842 UPDATE_PC_AND_RETURN(0);
2843 }
2844
2845 DEFAULT:
2846 fatal("Unimplemented opcode %d = %s", opcode,
2847 Bytecodes::name((Bytecodes::Code)opcode));
2848 goto finish;
2849
2850 } /* switch(opc) */
2851
2852
2853 #ifdef USELABELS
2854 check_for_exception:
2855 #endif
2856 {
2857 if (!THREAD->has_pending_exception()) {
2858 CONTINUE;
2859 }
2860 /* We will be gcsafe soon, so flush our state. */
2861 DECACHE_PC();
2862 goto handle_exception;
2863 }
2864 do_continue: ;
2865
2866 } /* while (1) interpreter loop */
2867
2868
2869 // An exception exists in the thread state see whether this activation can handle it
2870 handle_exception: {
2871
2872 HandleMarkCleaner __hmc(THREAD);
2873 Handle except_oop(THREAD, THREAD->pending_exception());
2874 // Prevent any subsequent HandleMarkCleaner in the VM
2875 // from freeing the except_oop handle.
2876 HandleMark __hm(THREAD);
2877
2878 THREAD->clear_pending_exception();
2879 assert(except_oop() != nullptr, "No exception to process");
2880 intptr_t continuation_bci;
2881 // expression stack is emptied
2882 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2883 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
2884 handle_exception);
2885
2886 except_oop = Handle(THREAD, THREAD->vm_result_oop());
2887 THREAD->set_vm_result_oop(nullptr);
2888 if (continuation_bci >= 0) {
2889 // Place exception on top of stack
2890 SET_STACK_OBJECT(except_oop(), 0);
2891 MORE_STACK(1);
2892 pc = METHOD->code_base() + continuation_bci;
2893 if (log_is_enabled(Info, exceptions)) {
2894 ResourceMark rm(THREAD);
2895 stringStream tempst;
2896 tempst.print("interpreter method <%s>\n"
2897 " at bci %d, continuing at %d for thread " INTPTR_FORMAT,
2898 METHOD->print_value_string(),
2899 (int)(istate->bcp() - METHOD->code_base()),
2900 (int)continuation_bci, p2i(THREAD));
2901 Exceptions::log_exception(except_oop, tempst.as_string());
2902 }
2903 // for AbortVMOnException flag
2904 Exceptions::debug_check_abort(except_oop);
2905 goto run;
2906 }
2907 if (log_is_enabled(Info, exceptions)) {
2908 ResourceMark rm;
2909 stringStream tempst;
2910 tempst.print("interpreter method <%s>\n"
2911 " at bci %d, unwinding for thread " INTPTR_FORMAT,
2912 METHOD->print_value_string(),
2913 (int)(istate->bcp() - METHOD->code_base()),
2914 p2i(THREAD));
2915 Exceptions::log_exception(except_oop, tempst.as_string());
2916 }
2917 // for AbortVMOnException flag
2918 Exceptions::debug_check_abort(except_oop);
2919
2920 // No handler in this activation, unwind and try again
2921 THREAD->set_pending_exception(except_oop(), nullptr, 0);
2922 goto handle_return;
2923 } // handle_exception:
2924
2925 // Return from an interpreter invocation with the result of the interpretation
2926 // on the top of the Java Stack (or a pending exception)
2927
2928 handle_Pop_Frame: {
2929
2930 // We don't really do anything special here except we must be aware
2931 // that we can get here without ever locking the method (if sync).
2932 // Also we skip the notification of the exit.
2933
2934 istate->set_msg(popping_frame);
2935 // Clear pending so while the pop is in process
2936 // we don't start another one if a call_vm is done.
2937 THREAD->clear_popframe_condition();
2938 // Let interpreter (only) see the we're in the process of popping a frame
2939 THREAD->set_pop_frame_in_process();
2940
2941 goto handle_return;
2942
2943 } // handle_Pop_Frame
2944
2945 // ForceEarlyReturn ends a method, and returns to the caller with a return value
2946 // given by the invoker of the early return.
2947 handle_Early_Return: {
2948
2949 istate->set_msg(early_return);
2950
2951 // Clear expression stack.
2952 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2953
2954 JvmtiThreadState *ts = THREAD->jvmti_thread_state();
2955
2956 // Push the value to be returned.
2957 switch (istate->method()->result_type()) {
2958 case T_BOOLEAN:
2959 case T_SHORT:
2960 case T_BYTE:
2961 case T_CHAR:
2962 case T_INT:
2963 SET_STACK_INT(ts->earlyret_value().i, 0);
2964 MORE_STACK(1);
2965 break;
2966 case T_LONG:
2967 SET_STACK_LONG(ts->earlyret_value().j, 1);
2968 MORE_STACK(2);
2969 break;
2970 case T_FLOAT:
2971 SET_STACK_FLOAT(ts->earlyret_value().f, 0);
2972 MORE_STACK(1);
2973 break;
2974 case T_DOUBLE:
2975 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
2976 MORE_STACK(2);
2977 break;
2978 case T_ARRAY:
2979 case T_OBJECT:
2980 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
2981 MORE_STACK(1);
2982 break;
2983 default:
2984 ShouldNotReachHere();
2985 }
2986
2987 ts->clr_earlyret_value();
2988 ts->set_earlyret_oop(nullptr);
2989 ts->clr_earlyret_pending();
2990
2991 // Fall through to handle_return.
2992
2993 } // handle_Early_Return
2994
2995 handle_return: {
2996 // A storestore barrier is required to order initialization of
2997 // final fields with publishing the reference to the object that
2998 // holds the field. Without the barrier the value of final fields
2999 // can be observed to change.
3000 OrderAccess::storestore();
3001
3002 DECACHE_STATE();
3003
3004 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
3005 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
3006 Handle original_exception(THREAD, THREAD->pending_exception());
3007 Handle illegal_state_oop(THREAD, nullptr);
3008
3009 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
3010 // in any following VM entries from freeing our live handles, but illegal_state_oop
3011 // isn't really allocated yet and so doesn't become live until later and
3012 // in unpredictable places. Instead we must protect the places where we enter the
3013 // VM. It would be much simpler (and safer) if we could allocate a real handle with
3014 // a null oop in it and then overwrite the oop later as needed. This isn't
3015 // unfortunately isn't possible.
3016
3017 if (THREAD->has_pending_exception()) {
3018 THREAD->clear_pending_exception();
3019 }
3020
3021 //
3022 // As far as we are concerned we have returned. If we have a pending exception
3023 // that will be returned as this invocation's result. However if we get any
3024 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
3025 // will be our final result (i.e. monitor exception trumps a pending exception).
3026 //
3027
3028 // If we never locked the method (or really passed the point where we would have),
3029 // there is no need to unlock it (or look for other monitors), since that
3030 // could not have happened.
3031
3032 if (THREAD->do_not_unlock_if_synchronized()) {
3033
3034 // Never locked, reset the flag now because obviously any caller must
3035 // have passed their point of locking for us to have gotten here.
3036
3037 THREAD->set_do_not_unlock_if_synchronized(false);
3038 } else {
3039 // At this point we consider that we have returned. We now check that the
3040 // locks were properly block structured. If we find that they were not
3041 // used properly we will return with an illegal monitor exception.
3042 // The exception is checked by the caller not the callee since this
3043 // checking is considered to be part of the invocation and therefore
3044 // in the callers scope (JVM spec 8.13).
3045 //
3046 // Another weird thing to watch for is if the method was locked
3047 // recursively and then not exited properly. This means we must
3048 // examine all the entries in reverse time(and stack) order and
3049 // unlock as we find them. If we find the method monitor before
3050 // we are at the initial entry then we should throw an exception.
3051 // It is not clear the template based interpreter does this
3052 // correctly
3053
3054 BasicObjectLock* base = istate->monitor_base();
3055 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
3056 bool method_unlock_needed = METHOD->is_synchronized();
3057 // We know the initial monitor was used for the method don't check that
3058 // slot in the loop
3059 if (method_unlock_needed) base--;
3060
3061 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
3062 while (end < base) {
3063 oop lockee = end->obj();
3064 if (lockee != nullptr) {
3065 InterpreterRuntime::monitorexit(end);
3066
3067 // One error is plenty
3068 if (illegal_state_oop() == nullptr && !suppress_error) {
3069 {
3070 // Prevent any HandleMarkCleaner from freeing our live handles
3071 HandleMark __hm(THREAD);
3072 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3073 }
3074 assert(THREAD->has_pending_exception(), "Lost our exception!");
3075 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3076 THREAD->clear_pending_exception();
3077 }
3078 }
3079 end++;
3080 }
3081 // Unlock the method if needed
3082 if (method_unlock_needed) {
3083 if (base->obj() == nullptr) {
3084 // The method is already unlocked this is not good.
3085 if (illegal_state_oop() == nullptr && !suppress_error) {
3086 {
3087 // Prevent any HandleMarkCleaner from freeing our live handles
3088 HandleMark __hm(THREAD);
3089 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3090 }
3091 assert(THREAD->has_pending_exception(), "Lost our exception!");
3092 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3093 THREAD->clear_pending_exception();
3094 }
3095 } else {
3096 //
3097 // The initial monitor is always used for the method
3098 // However if that slot is no longer the oop for the method it was unlocked
3099 // and reused by something that wasn't unlocked!
3100 //
3101 // deopt can come in with rcvr dead because c2 knows
3102 // its value is preserved in the monitor. So we can't use locals[0] at all
3103 // and must use first monitor slot.
3104 //
3105 oop rcvr = base->obj();
3106 if (rcvr == nullptr) {
3107 if (!suppress_error) {
3108 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
3109 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3110 THREAD->clear_pending_exception();
3111 }
3112 } else {
3113 InterpreterRuntime::monitorexit(base);
3114 if (THREAD->has_pending_exception()) {
3115 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3116 THREAD->clear_pending_exception();
3117 }
3118 }
3119 }
3120 }
3121 }
3122 // Clear the do_not_unlock flag now.
3123 THREAD->set_do_not_unlock_if_synchronized(false);
3124
3125 //
3126 // Notify jvmti/jvmdi
3127 //
3128 // NOTE: we do not notify a method_exit if we have a pending exception,
3129 // including an exception we generate for unlocking checks. In the former
3130 // case, JVMDI has already been notified by our call for the exception handler
3131 // and in both cases as far as JVMDI is concerned we have already returned.
3132 // If we notify it again JVMDI will be all confused about how many frames
3133 // are still on the stack (4340444).
3134 //
3135 // NOTE Further! It turns out the JVMTI spec in fact expects to see
3136 // method_exit events whenever we leave an activation unless it was done
3137 // for popframe. This is nothing like jvmdi. However we are passing the
3138 // tests at the moment (apparently because they are jvmdi based) so rather
3139 // than change this code and possibly fail tests we will leave it alone
3140 // (with this note) in anticipation of changing the vm and the tests
3141 // simultaneously.
3142
3143 suppress_exit_event = suppress_exit_event || illegal_state_oop() != nullptr;
3144
3145 // Whenever JVMTI puts a thread in interp_only_mode, method
3146 // entry/exit events are sent for that thread to track stack depth.
3147
3148 if (JVMTI_ENABLED && !suppress_exit_event && THREAD->is_interp_only_mode()) {
3149 // Prevent any HandleMarkCleaner from freeing our live handles
3150 HandleMark __hm(THREAD);
3151 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
3152 }
3153
3154 //
3155 // See if we are returning any exception
3156 // A pending exception that was pending prior to a possible popping frame
3157 // overrides the popping frame.
3158 //
3159 assert(!suppress_error || (suppress_error && illegal_state_oop() == nullptr), "Error was not suppressed");
3160 if (illegal_state_oop() != nullptr || original_exception() != nullptr) {
3161 // Inform the frame manager we have no result.
3162 istate->set_msg(throwing_exception);
3163 if (illegal_state_oop() != nullptr)
3164 THREAD->set_pending_exception(illegal_state_oop(), nullptr, 0);
3165 else
3166 THREAD->set_pending_exception(original_exception(), nullptr, 0);
3167 UPDATE_PC_AND_RETURN(0);
3168 }
3169
3170 if (istate->msg() == popping_frame) {
3171 // Make it simpler on the assembly code and set the message for the frame pop.
3172 // returns
3173 if (istate->prev() == nullptr) {
3174 // We must be returning to a deoptimized frame (because popframe only happens between
3175 // two interpreted frames). We need to save the current arguments in C heap so that
3176 // the deoptimized frame when it restarts can copy the arguments to its expression
3177 // stack and re-execute the call. We also have to notify deoptimization that this
3178 // has occurred and to pick the preserved args copy them to the deoptimized frame's
3179 // java expression stack. Yuck.
3180 //
3181 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
3182 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
3183 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
3184 }
3185 } else {
3186 istate->set_msg(return_from_method);
3187 }
3188
3189 // Normal return
3190 // Advance the pc and return to frame manager
3191 UPDATE_PC_AND_RETURN(1);
3192 } /* handle_return: */
3193
3194 // This is really a fatal error return
3195
3196 finish:
3197 DECACHE_TOS();
3198 DECACHE_PC();
3199
3200 return;
3201 }
3202
3203 // This constructor should only be used to construct the object to signal
3204 // interpreter initialization. All other instances should be created by
3205 // the frame manager.
3206 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
3207 if (msg != initialize) ShouldNotReachHere();
3208 _msg = msg;
3209 _self_link = this;
3210 _prev_link = nullptr;
3211 }
3212
3213 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
3214 intptr_t* locals, int locals_offset) {
3215 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
3216 locals[Interpreter::local_index_at(-locals_offset)] = value;
3217 }
3218
3219 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
3220 int to_offset) {
3221 tos[Interpreter::expr_index_at(-to_offset)] =
3222 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
3223 }
3224
3225 void BytecodeInterpreter::dup(intptr_t *tos) {
3226 copy_stack_slot(tos, -1, 0);
3227 }
3228
3229 void BytecodeInterpreter::dup2(intptr_t *tos) {
3230 copy_stack_slot(tos, -2, 0);
3231 copy_stack_slot(tos, -1, 1);
3232 }
3233
3234 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
3235 /* insert top word two down */
3236 copy_stack_slot(tos, -1, 0);
3237 copy_stack_slot(tos, -2, -1);
3238 copy_stack_slot(tos, 0, -2);
3239 }
3240
3241 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
3242 /* insert top word three down */
3243 copy_stack_slot(tos, -1, 0);
3244 copy_stack_slot(tos, -2, -1);
3245 copy_stack_slot(tos, -3, -2);
3246 copy_stack_slot(tos, 0, -3);
3247 }
3248 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
3249 /* insert top 2 slots three down */
3250 copy_stack_slot(tos, -1, 1);
3251 copy_stack_slot(tos, -2, 0);
3252 copy_stack_slot(tos, -3, -1);
3253 copy_stack_slot(tos, 1, -2);
3254 copy_stack_slot(tos, 0, -3);
3255 }
3256 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
3257 /* insert top 2 slots four down */
3258 copy_stack_slot(tos, -1, 1);
3259 copy_stack_slot(tos, -2, 0);
3260 copy_stack_slot(tos, -3, -1);
3261 copy_stack_slot(tos, -4, -2);
3262 copy_stack_slot(tos, 1, -3);
3263 copy_stack_slot(tos, 0, -4);
3264 }
3265
3266
3267 void BytecodeInterpreter::swap(intptr_t *tos) {
3268 // swap top two elements
3269 intptr_t val = tos[Interpreter::expr_index_at(1)];
3270 // Copy -2 entry to -1
3271 copy_stack_slot(tos, -2, -1);
3272 // Store saved -1 entry into -2
3273 tos[Interpreter::expr_index_at(2)] = val;
3274 }
3275 // --------------------------------------------------------------------------------
3276 // Non-product code
3277 #ifndef PRODUCT
3278
3279 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
3280 switch (msg) {
3281 case BytecodeInterpreter::no_request: return("no_request");
3282 case BytecodeInterpreter::initialize: return("initialize");
3283 // status message to C++ interpreter
3284 case BytecodeInterpreter::method_entry: return("method_entry");
3285 case BytecodeInterpreter::method_resume: return("method_resume");
3286 case BytecodeInterpreter::got_monitors: return("got_monitors");
3287 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
3288 // requests to frame manager from C++ interpreter
3289 case BytecodeInterpreter::call_method: return("call_method");
3290 case BytecodeInterpreter::return_from_method: return("return_from_method");
3291 case BytecodeInterpreter::more_monitors: return("more_monitors");
3292 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
3293 case BytecodeInterpreter::popping_frame: return("popping_frame");
3294 case BytecodeInterpreter::do_osr: return("do_osr");
3295 // deopt
3296 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
3297 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
3298 default: return("BAD MSG");
3299 }
3300 }
3301 void
3302 BytecodeInterpreter::print() {
3303 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
3304 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
3305 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
3306 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
3307 {
3308 ResourceMark rm;
3309 char *method_name = _method->name_and_sig_as_C_string();
3310 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
3311 }
3312 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
3313 tty->print_cr("msg: %s", C_msg(this->_msg));
3314 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
3315 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
3316 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
3317 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
3318 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
3319 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
3320 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp));
3321 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
3322 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
3323 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
3324 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
3325 }
3326
3327 extern "C" {
3328 void PI(uintptr_t arg) {
3329 ((BytecodeInterpreter*)arg)->print();
3330 }
3331 }
3332 #endif // PRODUCT