1 /*
2 * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4 * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "asm/macroAssembler.inline.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/barrierSetAssembler.hpp"
30 #include "interp_masm_riscv.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "interpreter/interpreterRuntime.hpp"
33 #include "logging/log.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/method.hpp"
37 #include "oops/methodData.hpp"
38 #include "oops/resolvedFieldEntry.hpp"
39 #include "oops/resolvedIndyEntry.hpp"
40 #include "oops/resolvedMethodEntry.hpp"
41 #include "prims/jvmtiExport.hpp"
42 #include "prims/jvmtiThreadState.hpp"
43 #include "runtime/basicLock.hpp"
44 #include "runtime/frame.inline.hpp"
45 #include "runtime/javaThread.hpp"
46 #include "runtime/safepointMechanism.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "utilities/powerOfTwo.hpp"
49
50 void InterpreterMacroAssembler::narrow(Register result) {
51 // Get method->_constMethod->_result_type
52 ld(t0, Address(fp, frame::interpreter_frame_method_offset * wordSize));
53 ld(t0, Address(t0, Method::const_offset()));
54 lbu(t0, Address(t0, ConstMethod::result_type_offset()));
55
56 Label done, notBool, notByte, notChar;
57
58 // common case first
59 mv(t1, T_INT);
60 beq(t0, t1, done);
61
62 // mask integer result to narrower return type.
63 mv(t1, T_BOOLEAN);
64 bne(t0, t1, notBool);
65
66 andi(result, result, 0x1);
67 j(done);
68
69 bind(notBool);
70 mv(t1, T_BYTE);
71 bne(t0, t1, notByte);
72 sext(result, result, 8);
73 j(done);
74
75 bind(notByte);
76 mv(t1, T_CHAR);
77 bne(t0, t1, notChar);
78 zext(result, result, 16);
79 j(done);
80
81 bind(notChar);
82 sext(result, result, 16);
83
84 bind(done);
85 sext(result, result, 32);
86 }
87
88 void InterpreterMacroAssembler::jump_to_entry(address entry) {
89 assert(entry != nullptr, "Entry must have been generated by now");
90 j(entry);
91 }
92
93 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
94 if (JvmtiExport::can_pop_frame()) {
95 Label L;
96 // Initiate popframe handling only if it is not already being
97 // processed. If the flag has the popframe_processing bit set,
98 // it means that this code is called *during* popframe handling - we
99 // don't want to reenter.
100 // This method is only called just after the call into the vm in
101 // call_VM_base, so the arg registers are available.
102 lwu(t1, Address(xthread, JavaThread::popframe_condition_offset()));
103 test_bit(t0, t1, exact_log2(JavaThread::popframe_pending_bit));
104 beqz(t0, L);
105 test_bit(t0, t1, exact_log2(JavaThread::popframe_processing_bit));
106 bnez(t0, L);
107 // Call Interpreter::remove_activation_preserving_args_entry() to get the
108 // address of the same-named entrypoint in the generated interpreter code.
109 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
110 jr(x10);
111 bind(L);
112 }
113 }
114
115
116 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
117 ld(x12, Address(xthread, JavaThread::jvmti_thread_state_offset()));
118 const Address tos_addr(x12, JvmtiThreadState::earlyret_tos_offset());
119 const Address oop_addr(x12, JvmtiThreadState::earlyret_oop_offset());
120 const Address val_addr(x12, JvmtiThreadState::earlyret_value_offset());
121 switch (state) {
122 case atos:
123 ld(x10, oop_addr);
124 sd(zr, oop_addr);
125 verify_oop(x10);
126 break;
127 case ltos:
128 ld(x10, val_addr);
129 break;
130 case btos: // fall through
131 case ztos: // fall through
132 case ctos: // fall through
133 case stos: // fall through
134 case itos:
135 lwu(x10, val_addr);
136 break;
137 case ftos:
138 flw(f10, val_addr);
139 break;
140 case dtos:
141 fld(f10, val_addr);
142 break;
143 case vtos:
144 /* nothing to do */
145 break;
146 default:
147 ShouldNotReachHere();
148 }
149 // Clean up tos value in the thread object
150 mv(t0, (int)ilgl);
151 sw(t0, tos_addr);
152 sw(zr, val_addr);
153 }
154
155
156 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
157 if (JvmtiExport::can_force_early_return()) {
158 Label L;
159 ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
160 beqz(t0, L); // if thread->jvmti_thread_state() is null then exit
161
162 // Initiate earlyret handling only if it is not already being processed.
163 // If the flag has the earlyret_processing bit set, it means that this code
164 // is called *during* earlyret handling - we don't want to reenter.
165 lwu(t0, Address(t0, JvmtiThreadState::earlyret_state_offset()));
166 mv(t1, JvmtiThreadState::earlyret_pending);
167 bne(t0, t1, L);
168
169 // Call Interpreter::remove_activation_early_entry() to get the address of the
170 // same-named entrypoint in the generated interpreter code.
171 ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
172 lwu(t0, Address(t0, JvmtiThreadState::earlyret_tos_offset()));
173 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), t0);
174 jr(x10);
175 bind(L);
176 }
177 }
178
179 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
180 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
181 lbu(t1, Address(xbcp, bcp_offset));
182 lbu(reg, Address(xbcp, bcp_offset + 1));
183 slli(t1, t1, 8);
184 add(reg, reg, t1);
185 }
186
187 void InterpreterMacroAssembler::get_dispatch() {
188 la(xdispatch, ExternalAddress((address)Interpreter::dispatch_table()));
189 }
190
191 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
192 Register tmp,
193 int bcp_offset,
194 size_t index_size) {
195 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
196 if (index_size == sizeof(u2)) {
197 load_short_misaligned(index, Address(xbcp, bcp_offset), tmp, false);
198 } else if (index_size == sizeof(u4)) {
199 load_int_misaligned(index, Address(xbcp, bcp_offset), tmp, false);
200 } else if (index_size == sizeof(u1)) {
201 load_unsigned_byte(index, Address(xbcp, bcp_offset));
202 } else {
203 ShouldNotReachHere();
204 }
205 }
206
207 // Load object from cpool->resolved_references(index)
208 void InterpreterMacroAssembler::load_resolved_reference_at_index(
209 Register result, Register index, Register tmp) {
210 assert_different_registers(result, index);
211
212 get_constant_pool(result);
213 // Load pointer for resolved_references[] objArray
214 ld(result, Address(result, ConstantPool::cache_offset()));
215 ld(result, Address(result, ConstantPoolCache::resolved_references_offset()));
216 resolve_oop_handle(result, tmp, t1);
217 // Add in the index
218 addi(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
219 shadd(result, index, result, index, LogBytesPerHeapOop);
220 load_heap_oop(result, Address(result, 0), tmp, t1);
221 }
222
223 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
224 Register cpool, Register index, Register klass, Register temp) {
225 shadd(temp, index, cpool, temp, LogBytesPerWord);
226 lhu(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
227 ld(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
228 shadd(klass, temp, klass, temp, LogBytesPerWord);
229 ld(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
230 }
231
232 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
233 // subtype of super_klass.
234 //
235 // Args:
236 // x10: superklass
237 // Rsub_klass: subklass
238 //
239 // Kills:
240 // x12
241 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
242 Label& ok_is_subtype) {
243 assert(Rsub_klass != x10, "x10 holds superklass");
244 assert(Rsub_klass != x12, "x12 holds 2ndary super array length");
245
246 // Profile the not-null value's klass.
247 profile_typecheck(x12, Rsub_klass); // blows x12
248
249 // Do the check.
250 check_klass_subtype(Rsub_klass, x10, x12, ok_is_subtype); // blows x12
251 }
252
253 // Java Expression Stack
254
255 void InterpreterMacroAssembler::pop_ptr(Register r) {
256 ld(r, Address(esp, 0));
257 addi(esp, esp, wordSize);
258 }
259
260 void InterpreterMacroAssembler::pop_i(Register r) {
261 lw(r, Address(esp, 0)); // lw do signed extended
262 addi(esp, esp, wordSize);
263 }
264
265 void InterpreterMacroAssembler::pop_l(Register r) {
266 ld(r, Address(esp, 0));
267 addi(esp, esp, 2 * Interpreter::stackElementSize);
268 }
269
270 void InterpreterMacroAssembler::push_ptr(Register r) {
271 subi(esp, esp, wordSize);
272 sd(r, Address(esp, 0));
273 }
274
275 void InterpreterMacroAssembler::push_i(Register r) {
276 subi(esp, esp, wordSize);
277 sext(r, r, 32);
278 sd(r, Address(esp, 0));
279 }
280
281 void InterpreterMacroAssembler::push_l(Register r) {
282 subi(esp, esp, 2 * wordSize);
283 sd(zr, Address(esp, wordSize));
284 sd(r, Address(esp));
285 }
286
287 void InterpreterMacroAssembler::pop_f(FloatRegister r) {
288 flw(r, Address(esp, 0));
289 addi(esp, esp, wordSize);
290 }
291
292 void InterpreterMacroAssembler::pop_d(FloatRegister r) {
293 fld(r, Address(esp, 0));
294 addi(esp, esp, 2 * Interpreter::stackElementSize);
295 }
296
297 void InterpreterMacroAssembler::push_f(FloatRegister r) {
298 subi(esp, esp, wordSize);
299 fsw(r, Address(esp, 0));
300 }
301
302 void InterpreterMacroAssembler::push_d(FloatRegister r) {
303 subi(esp, esp, 2 * wordSize);
304 fsd(r, Address(esp, 0));
305 }
306
307 void InterpreterMacroAssembler::pop(TosState state) {
308 switch (state) {
309 case atos:
310 pop_ptr();
311 verify_oop(x10);
312 break;
313 case btos: // fall through
314 case ztos: // fall through
315 case ctos: // fall through
316 case stos: // fall through
317 case itos:
318 pop_i();
319 break;
320 case ltos:
321 pop_l();
322 break;
323 case ftos:
324 pop_f();
325 break;
326 case dtos:
327 pop_d();
328 break;
329 case vtos:
330 /* nothing to do */
331 break;
332 default:
333 ShouldNotReachHere();
334 }
335 }
336
337 void InterpreterMacroAssembler::push(TosState state) {
338 switch (state) {
339 case atos:
340 verify_oop(x10);
341 push_ptr();
342 break;
343 case btos: // fall through
344 case ztos: // fall through
345 case ctos: // fall through
346 case stos: // fall through
347 case itos:
348 push_i();
349 break;
350 case ltos:
351 push_l();
352 break;
353 case ftos:
354 push_f();
355 break;
356 case dtos:
357 push_d();
358 break;
359 case vtos:
360 /* nothing to do */
361 break;
362 default:
363 ShouldNotReachHere();
364 }
365 }
366
367 // Helpers for swap and dup
368 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
369 ld(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
370 }
371
372 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
373 sd(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
374 }
375
376 void InterpreterMacroAssembler::load_float(Address src) {
377 flw(f10, src);
378 }
379
380 void InterpreterMacroAssembler::load_double(Address src) {
381 fld(f10, src);
382 }
383
384 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
385 // set sender sp
386 mv(x19_sender_sp, sp);
387 // record last_sp
388 sub(t0, esp, fp);
389 srai(t0, t0, Interpreter::logStackElementSize);
390 sd(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
391 }
392
393 // Jump to from_interpreted entry of a call unless single stepping is possible
394 // in this thread in which case we must call the i2i entry
395 void InterpreterMacroAssembler::jump_from_interpreted(Register method) {
396 prepare_to_jump_from_interpreted();
397 if (JvmtiExport::can_post_interpreter_events()) {
398 Label run_compiled_code;
399 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
400 // compiled code in threads for which the event is enabled. Check here for
401 // interp_only_mode if these events CAN be enabled.
402 lwu(t0, Address(xthread, JavaThread::interp_only_mode_offset()));
403 beqz(t0, run_compiled_code);
404 ld(t1, Address(method, Method::interpreter_entry_offset()));
405 jr(t1);
406 bind(run_compiled_code);
407 }
408
409 ld(t1, Address(method, Method::from_interpreted_offset()));
410 jr(t1);
411 }
412
413 // The following two routines provide a hook so that an implementation
414 // can schedule the dispatch in two parts. amd64 does not do this.
415 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
416 }
417
418 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
419 dispatch_next(state, step);
420 }
421
422 void InterpreterMacroAssembler::dispatch_base(TosState state,
423 address* table,
424 bool verifyoop,
425 bool generate_poll,
426 Register Rs) {
427 // Pay attention to the argument Rs, which is acquiesce in t0.
428 if (VerifyActivationFrameSize) {
429 Label L;
430 sub(t1, fp, esp);
431 int min_frame_size =
432 (frame::link_offset - frame::interpreter_frame_initial_sp_offset + frame::metadata_words) * wordSize;
433 sub(t1, t1, min_frame_size);
434 bgez(t1, L);
435 stop("broken stack frame");
436 bind(L);
437 }
438 if (verifyoop && state == atos) {
439 verify_oop(x10);
440 }
441
442 Label safepoint;
443 address* const safepoint_table = Interpreter::safept_table(state);
444 bool needs_thread_local_poll = generate_poll && table != safepoint_table;
445
446 if (needs_thread_local_poll) {
447 NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
448 ld(t1, Address(xthread, JavaThread::polling_word_offset()));
449 test_bit(t1, t1, exact_log2(SafepointMechanism::poll_bit()));
450 bnez(t1, safepoint);
451 }
452 if (table == Interpreter::dispatch_table(state)) {
453 mv(t1, Interpreter::distance_from_dispatch_table(state));
454 add(t1, Rs, t1);
455 shadd(t1, t1, xdispatch, t1, 3);
456 } else {
457 mv(t1, (address)table);
458 shadd(t1, Rs, t1, Rs, 3);
459 }
460 ld(t1, Address(t1));
461 jr(t1);
462
463 if (needs_thread_local_poll) {
464 bind(safepoint);
465 la(t1, ExternalAddress((address)safepoint_table));
466 shadd(t1, Rs, t1, Rs, 3);
467 ld(t1, Address(t1));
468 jr(t1);
469 }
470 }
471
472 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll, Register Rs) {
473 dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll, Rs);
474 }
475
476 void InterpreterMacroAssembler::dispatch_only_normal(TosState state, Register Rs) {
477 dispatch_base(state, Interpreter::normal_table(state), true, false, Rs);
478 }
479
480 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state, Register Rs) {
481 dispatch_base(state, Interpreter::normal_table(state), false, false, Rs);
482 }
483
484 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
485 // load next bytecode
486 load_unsigned_byte(t0, Address(xbcp, step));
487 add(xbcp, xbcp, step);
488 dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
489 }
490
491 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
492 // load current bytecode
493 lbu(t0, Address(xbcp, 0));
494 dispatch_base(state, table);
495 }
496
497 // remove activation
498 //
499 // Unlock the receiver if this is a synchronized method.
500 // Unlock any Java monitors from synchronized blocks.
501 // Apply stack watermark barrier.
502 // Notify JVMTI.
503 // Remove the activation from the stack.
504 //
505 // If there are locked Java monitors
506 // If throw_monitor_exception
507 // throws IllegalMonitorStateException
508 // Else if install_monitor_exception
509 // installs IllegalMonitorStateException
510 // Else
511 // no error processing
512 void InterpreterMacroAssembler::remove_activation(TosState state,
513 bool throw_monitor_exception,
514 bool install_monitor_exception,
515 bool notify_jvmdi) {
516 // Note: Registers x13 may be in use for the
517 // result check if synchronized method
518 Label unlocked, unlock, no_unlock;
519
520 #ifdef ASSERT
521 Label not_preempted;
522 ld(t0, Address(xthread, JavaThread::preempt_alternate_return_offset()));
523 beqz(t0, not_preempted);
524 stop("remove_activation: should not have alternate return address set");
525 bind(not_preempted);
526 #endif /* ASSERT */
527
528 // get the value of _do_not_unlock_if_synchronized into x13
529 const Address do_not_unlock_if_synchronized(xthread,
530 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
531 lbu(x13, do_not_unlock_if_synchronized);
532 sb(zr, do_not_unlock_if_synchronized); // reset the flag
533
534 // get method access flags
535 ld(x11, Address(fp, frame::interpreter_frame_method_offset * wordSize));
536 load_unsigned_short(x12, Address(x11, Method::access_flags_offset()));
537 test_bit(t0, x12, exact_log2(JVM_ACC_SYNCHRONIZED));
538 beqz(t0, unlocked);
539
540 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
541 // is set.
542 bnez(x13, no_unlock);
543
544 // unlock monitor
545 push(state); // save result
546
547 // BasicObjectLock will be first in list, since this is a
548 // synchronized method. However, need to check that the object has
549 // not been unlocked by an explicit monitorexit bytecode.
550 const Address monitor(fp, frame::interpreter_frame_initial_sp_offset *
551 wordSize - (int) sizeof(BasicObjectLock));
552 // We use c_rarg1 so that if we go slow path it will be the correct
553 // register for unlock_object to pass to VM directly
554 la(c_rarg1, monitor); // address of first monitor
555
556 ld(x10, Address(c_rarg1, BasicObjectLock::obj_offset()));
557 bnez(x10, unlock);
558
559 pop(state);
560 if (throw_monitor_exception) {
561 // Entry already unlocked, need to throw exception
562 call_VM(noreg, CAST_FROM_FN_PTR(address,
563 InterpreterRuntime::throw_illegal_monitor_state_exception));
564 should_not_reach_here();
565 } else {
566 // Monitor already unlocked during a stack unroll. If requested,
567 // install an illegal_monitor_state_exception. Continue with
568 // stack unrolling.
569 if (install_monitor_exception) {
570 call_VM(noreg, CAST_FROM_FN_PTR(address,
571 InterpreterRuntime::new_illegal_monitor_state_exception));
572 }
573 j(unlocked);
574 }
575
576 bind(unlock);
577 unlock_object(c_rarg1);
578 pop(state);
579
580 // Check that for block-structured locking (i.e., that all locked
581 // objects has been unlocked)
582 bind(unlocked);
583
584 // x10: Might contain return value
585
586 // Check that all monitors are unlocked
587 {
588 Label loop, exception, entry, restart;
589 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
590 const Address monitor_block_top(
591 fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
592 const Address monitor_block_bot(
593 fp, frame::interpreter_frame_initial_sp_offset * wordSize);
594
595 bind(restart);
596 // We use c_rarg1 so that if we go slow path it will be the correct
597 // register for unlock_object to pass to VM directly
598 ld(c_rarg1, monitor_block_top); // derelativize pointer
599 shadd(c_rarg1, c_rarg1, fp, c_rarg1, LogBytesPerWord);
600 // c_rarg1 points to current entry, starting with top-most entry
601
602 la(x9, monitor_block_bot); // points to word before bottom of
603 // monitor block
604
605 j(entry);
606
607 // Entry already locked, need to throw exception
608 bind(exception);
609
610 if (throw_monitor_exception) {
611 // Throw exception
612 MacroAssembler::call_VM(noreg,
613 CAST_FROM_FN_PTR(address, InterpreterRuntime::
614 throw_illegal_monitor_state_exception));
615
616 should_not_reach_here();
617 } else {
618 // Stack unrolling. Unlock object and install illegal_monitor_exception.
619 // Unlock does not block, so don't have to worry about the frame.
620 // We don't have to preserve c_rarg1 since we are going to throw an exception.
621
622 push(state);
623 unlock_object(c_rarg1);
624 pop(state);
625
626 if (install_monitor_exception) {
627 call_VM(noreg, CAST_FROM_FN_PTR(address,
628 InterpreterRuntime::
629 new_illegal_monitor_state_exception));
630 }
631
632 j(restart);
633 }
634
635 bind(loop);
636 // check if current entry is used
637 add(t0, c_rarg1, in_bytes(BasicObjectLock::obj_offset()));
638 ld(t0, Address(t0, 0));
639 bnez(t0, exception);
640
641 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
642 bind(entry);
643 bne(c_rarg1, x9, loop); // check if bottom reached if not at bottom then check this entry
644 }
645
646 bind(no_unlock);
647
648 JFR_ONLY(enter_jfr_critical_section();)
649
650 // The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
651 // that would normally not be safe to use. Such bad returns into unsafe territory of
652 // the stack, will call InterpreterRuntime::at_unwind.
653 Label slow_path;
654 Label fast_path;
655 safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
656 j(fast_path);
657
658 bind(slow_path);
659 push(state);
660 set_last_Java_frame(esp, fp, pc(), t0);
661 super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), xthread);
662 reset_last_Java_frame(true);
663 pop(state);
664 bind(fast_path);
665
666 // JVMTI support. Make sure the safepoint poll test is issued prior.
667 if (notify_jvmdi) {
668 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
669 } else {
670 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
671 }
672
673 // remove activation
674 // get sender esp
675 ld(t1,
676 Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize));
677 if (StackReservedPages > 0) {
678 // testing if reserved zone needs to be re-enabled
679 Label no_reserved_zone_enabling;
680
681 // check if already enabled - if so no re-enabling needed
682 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
683 lw(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
684 subw(t0, t0, StackOverflow::stack_guard_enabled);
685 beqz(t0, no_reserved_zone_enabling);
686
687 // look for an overflow into the stack reserved zone, i.e.
688 // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
689 ld(t0, Address(xthread, JavaThread::reserved_stack_activation_offset()));
690 ble(t1, t0, no_reserved_zone_enabling);
691
692 JFR_ONLY(leave_jfr_critical_section();)
693
694 call_VM_leaf(
695 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), xthread);
696 call_VM(noreg, CAST_FROM_FN_PTR(address,
697 InterpreterRuntime::throw_delayed_StackOverflowError));
698 should_not_reach_here();
699
700 bind(no_reserved_zone_enabling);
701 }
702
703 // remove frame anchor
704 leave();
705
706 JFR_ONLY(leave_jfr_critical_section();)
707
708 // restore sender esp
709 mv(esp, t1);
710
711 // If we're returning to interpreted code we will shortly be
712 // adjusting SP to allow some space for ESP. If we're returning to
713 // compiled code the saved sender SP was saved in sender_sp, so this
714 // restores it.
715 andi(sp, esp, -16);
716 }
717
718 #if INCLUDE_JFR
719 void InterpreterMacroAssembler::enter_jfr_critical_section() {
720 const Address sampling_critical_section(xthread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
721 mv(t0, true);
722 sb(t0, sampling_critical_section);
723 }
724
725 void InterpreterMacroAssembler::leave_jfr_critical_section() {
726 const Address sampling_critical_section(xthread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
727 sb(zr, sampling_critical_section);
728 }
729 #endif // INCLUDE_JFR
730
731 // Lock object
732 //
733 // Args:
734 // c_rarg1: BasicObjectLock to be used for locking
735 //
736 // Kills:
737 // x10
738 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, .. (param regs)
739 // t0, t1 (temp regs)
740 void InterpreterMacroAssembler::lock_object(Register lock_reg)
741 {
742 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
743
744 const Register tmp = c_rarg2;
745 const Register obj_reg = c_rarg3; // Will contain the oop
746 const Register tmp2 = c_rarg4;
747 const Register tmp3 = c_rarg5;
748
749 // Load object pointer into obj_reg (c_rarg3)
750 ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
751
752 Label done, slow_case;
753 fast_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
754 j(done);
755
756 bind(slow_case);
757 // Call the runtime routine for slow case
758 call_VM_preemptable(noreg,
759 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
760 lock_reg);
761
762 bind(done);
763 }
764
765
766 // Unlocks an object. Used in monitorexit bytecode and
767 // remove_activation. Throws an IllegalMonitorException if object is
768 // not locked by current thread.
769 //
770 // Args:
771 // c_rarg1: BasicObjectLock for lock
772 //
773 // Kills:
774 // x10
775 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, ... (param regs)
776 // t0, t1 (temp regs)
777 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
778 {
779 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
780
781 const Register swap_reg = x10;
782 const Register header_reg = c_rarg2; // Will contain the old oopMark
783 const Register obj_reg = c_rarg3; // Will contain the oop
784 const Register tmp_reg = c_rarg4; // Temporary used by fast_unlock
785
786 save_bcp(); // Save in case of exception
787
788 // Load oop into obj_reg (c_rarg3)
789 ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
790
791 // Free entry
792 sd(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
793
794 Label done, slow_case;
795 fast_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
796 j(done);
797
798 bind(slow_case);
799 // Call the runtime routine for slow case.
800 sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
801 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
802
803 bind(done);
804 restore_bcp();
805 }
806
807
808 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
809 Label& zero_continue) {
810 assert(ProfileInterpreter, "must be profiling interpreter");
811 ld(mdp, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
812 beqz(mdp, zero_continue);
813 }
814
815 // Set the method data pointer for the current bcp.
816 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
817 assert(ProfileInterpreter, "must be profiling interpreter");
818 Label set_mdp;
819 push_reg(RegSet::of(x10, x11), sp); // save x10, x11
820
821 // Test MDO to avoid the call if it is null.
822 ld(x10, Address(xmethod, in_bytes(Method::method_data_offset())));
823 beqz(x10, set_mdp);
824 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), xmethod, xbcp);
825 // x10: mdi
826 // mdo is guaranteed to be non-zero here, we checked for it before the call.
827 ld(x11, Address(xmethod, in_bytes(Method::method_data_offset())));
828 la(x11, Address(x11, in_bytes(MethodData::data_offset())));
829 add(x10, x11, x10);
830 sd(x10, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
831 bind(set_mdp);
832 pop_reg(RegSet::of(x10, x11), sp);
833 }
834
835 void InterpreterMacroAssembler::verify_method_data_pointer() {
836 assert(ProfileInterpreter, "must be profiling interpreter");
837 #ifdef ASSERT
838 Label verify_continue;
839 subi(sp, sp, 4 * wordSize);
840 sd(x10, Address(sp, 0));
841 sd(x11, Address(sp, wordSize));
842 sd(x12, Address(sp, 2 * wordSize));
843 sd(x13, Address(sp, 3 * wordSize));
844 test_method_data_pointer(x13, verify_continue); // If mdp is zero, continue
845 get_method(x11);
846
847 // If the mdp is valid, it will point to a DataLayout header which is
848 // consistent with the bcp. The converse is highly probable also.
849 lh(x12, Address(x13, in_bytes(DataLayout::bci_offset())));
850 ld(t0, Address(x11, Method::const_offset()));
851 add(x12, x12, t0);
852 la(x12, Address(x12, ConstMethod::codes_offset()));
853 beq(x12, xbcp, verify_continue);
854 // x10: method
855 // xbcp: bcp // xbcp == 22
856 // x13: mdp
857 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
858 x11, xbcp, x13);
859 bind(verify_continue);
860 ld(x10, Address(sp, 0));
861 ld(x11, Address(sp, wordSize));
862 ld(x12, Address(sp, 2 * wordSize));
863 ld(x13, Address(sp, 3 * wordSize));
864 addi(sp, sp, 4 * wordSize);
865 #endif // ASSERT
866 }
867
868
869 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
870 int constant,
871 Register value) {
872 assert(ProfileInterpreter, "must be profiling interpreter");
873 Address data(mdp_in, constant);
874 sd(value, data);
875 }
876
877
878 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
879 int constant) {
880 increment_mdp_data_at(mdp_in, noreg, constant);
881 }
882
883 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
884 Register index,
885 int constant) {
886 assert(ProfileInterpreter, "must be profiling interpreter");
887
888 assert_different_registers(t1, t0, mdp_in, index);
889
890 Address addr1(mdp_in, constant);
891 Address addr2(t1, 0);
892 Address &addr = addr1;
893 if (index != noreg) {
894 la(t1, addr1);
895 add(t1, t1, index);
896 addr = addr2;
897 }
898
899 ld(t0, addr);
900 addi(t0, t0, DataLayout::counter_increment);
901 sd(t0, addr);
902 }
903
904 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
905 int flag_byte_constant) {
906 assert(ProfileInterpreter, "must be profiling interpreter");
907 int flags_offset = in_bytes(DataLayout::flags_offset());
908 // Set the flag
909 lbu(t1, Address(mdp_in, flags_offset));
910 ori(t1, t1, flag_byte_constant);
911 sb(t1, Address(mdp_in, flags_offset));
912 }
913
914
915 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
916 int offset,
917 Register value,
918 Register test_value_out,
919 Label& not_equal_continue) {
920 assert(ProfileInterpreter, "must be profiling interpreter");
921 if (test_value_out == noreg) {
922 ld(t1, Address(mdp_in, offset));
923 bne(value, t1, not_equal_continue);
924 } else {
925 // Put the test value into a register, so caller can use it:
926 ld(test_value_out, Address(mdp_in, offset));
927 bne(value, test_value_out, not_equal_continue);
928 }
929 }
930
931
932 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
933 int offset_of_disp) {
934 assert(ProfileInterpreter, "must be profiling interpreter");
935 ld(t1, Address(mdp_in, offset_of_disp));
936 add(mdp_in, mdp_in, t1);
937 sd(mdp_in, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
938 }
939
940 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
941 Register reg,
942 int offset_of_disp) {
943 assert(ProfileInterpreter, "must be profiling interpreter");
944 add(t1, mdp_in, reg);
945 ld(t1, Address(t1, offset_of_disp));
946 add(mdp_in, mdp_in, t1);
947 sd(mdp_in, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
948 }
949
950
951 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
952 int constant) {
953 assert(ProfileInterpreter, "must be profiling interpreter");
954 add(mdp_in, mdp_in, (unsigned)constant);
955 sd(mdp_in, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
956 }
957
958
959 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
960 assert(ProfileInterpreter, "must be profiling interpreter");
961
962 // save/restore across call_VM
963 subi(sp, sp, 2 * wordSize);
964 sd(zr, Address(sp, 0));
965 sd(return_bci, Address(sp, wordSize));
966 call_VM(noreg,
967 CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
968 return_bci);
969 ld(zr, Address(sp, 0));
970 ld(return_bci, Address(sp, wordSize));
971 addi(sp, sp, 2 * wordSize);
972 }
973
974 void InterpreterMacroAssembler::profile_taken_branch(Register mdp) {
975 if (ProfileInterpreter) {
976 Label profile_continue;
977
978 // If no method data exists, go to profile_continue.
979 test_method_data_pointer(mdp, profile_continue);
980
981 // We are taking a branch. Increment the taken count.
982 increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
983
984 // The method data pointer needs to be updated to reflect the new target.
985 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
986 bind(profile_continue);
987 }
988 }
989
990 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
991 if (ProfileInterpreter) {
992 Label profile_continue;
993
994 // If no method data exists, go to profile_continue.
995 test_method_data_pointer(mdp, profile_continue);
996
997 // We are not taking a branch. Increment the not taken count.
998 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
999
1000 // The method data pointer needs to be updated to correspond to
1001 // the next bytecode
1002 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1003 bind(profile_continue);
1004 }
1005 }
1006
1007 void InterpreterMacroAssembler::profile_call(Register mdp) {
1008 if (ProfileInterpreter) {
1009 Label profile_continue;
1010
1011 // If no method data exists, go to profile_continue.
1012 test_method_data_pointer(mdp, profile_continue);
1013
1014 // We are making a call. Increment the count.
1015 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1016
1017 // The method data pointer needs to be updated to reflect the new target.
1018 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1019 bind(profile_continue);
1020 }
1021 }
1022
1023 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1024 if (ProfileInterpreter) {
1025 Label profile_continue;
1026
1027 // If no method data exists, go to profile_continue.
1028 test_method_data_pointer(mdp, profile_continue);
1029
1030 // We are making a call. Increment the count.
1031 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1032
1033 // The method data pointer needs to be updated to reflect the new target.
1034 update_mdp_by_constant(mdp,
1035 in_bytes(VirtualCallData::
1036 virtual_call_data_size()));
1037 bind(profile_continue);
1038 }
1039 }
1040
1041
1042 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1043 Register mdp) {
1044 if (ProfileInterpreter) {
1045 Label profile_continue;
1046
1047 // If no method data exists, go to profile_continue.
1048 test_method_data_pointer(mdp, profile_continue);
1049
1050 // Record the receiver type.
1051 profile_receiver_type(receiver, mdp, 0);
1052
1053 // The method data pointer needs to be updated to reflect the new target.
1054
1055 update_mdp_by_constant(mdp,
1056 in_bytes(VirtualCallData::
1057 virtual_call_data_size()));
1058 bind(profile_continue);
1059 }
1060 }
1061
1062 void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) {
1063 if (ProfileInterpreter) {
1064 Label profile_continue;
1065
1066 // If no method data exists, go to profile_continue.
1067 test_method_data_pointer(mdp, profile_continue);
1068
1069 // Update the total ret count.
1070 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1071
1072 for (uint row = 0; row < RetData::row_limit(); row++) {
1073 Label next_test;
1074
1075 // See if return_bci is equal to bci[n]:
1076 test_mdp_data_at(mdp,
1077 in_bytes(RetData::bci_offset(row)),
1078 return_bci, noreg,
1079 next_test);
1080
1081 // return_bci is equal to bci[n]. Increment the count.
1082 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
1083
1084 // The method data pointer needs to be updated to reflect the new target.
1085 update_mdp_by_offset(mdp,
1086 in_bytes(RetData::bci_displacement_offset(row)));
1087 j(profile_continue);
1088 bind(next_test);
1089 }
1090
1091 update_mdp_for_ret(return_bci);
1092
1093 bind(profile_continue);
1094 }
1095 }
1096
1097 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1098 if (ProfileInterpreter) {
1099 Label profile_continue;
1100
1101 // If no method data exists, go to profile_continue.
1102 test_method_data_pointer(mdp, profile_continue);
1103
1104 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1105
1106 // The method data pointer needs to be updated.
1107 int mdp_delta = in_bytes(BitData::bit_data_size());
1108 if (TypeProfileCasts) {
1109 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1110 }
1111 update_mdp_by_constant(mdp, mdp_delta);
1112
1113 bind(profile_continue);
1114 }
1115 }
1116
1117 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) {
1118 if (ProfileInterpreter) {
1119 Label profile_continue;
1120
1121 // If no method data exists, go to profile_continue.
1122 test_method_data_pointer(mdp, profile_continue);
1123
1124 // The method data pointer needs to be updated.
1125 int mdp_delta = in_bytes(BitData::bit_data_size());
1126 if (TypeProfileCasts) {
1127 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1128
1129 // Record the object type.
1130 profile_receiver_type(klass, mdp, 0);
1131 }
1132 update_mdp_by_constant(mdp, mdp_delta);
1133
1134 bind(profile_continue);
1135 }
1136 }
1137
1138 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1139 if (ProfileInterpreter) {
1140 Label profile_continue;
1141
1142 // If no method data exists, go to profile_continue.
1143 test_method_data_pointer(mdp, profile_continue);
1144
1145 // Update the default case count
1146 increment_mdp_data_at(mdp,
1147 in_bytes(MultiBranchData::default_count_offset()));
1148
1149 // The method data pointer needs to be updated.
1150 update_mdp_by_offset(mdp,
1151 in_bytes(MultiBranchData::
1152 default_displacement_offset()));
1153
1154 bind(profile_continue);
1155 }
1156 }
1157
1158 void InterpreterMacroAssembler::profile_switch_case(Register index,
1159 Register mdp,
1160 Register reg2) {
1161 if (ProfileInterpreter) {
1162 Label profile_continue;
1163
1164 // If no method data exists, go to profile_continue.
1165 test_method_data_pointer(mdp, profile_continue);
1166
1167 // Build the base (index * per_case_size_in_bytes()) +
1168 // case_array_offset_in_bytes()
1169 mv(reg2, in_bytes(MultiBranchData::per_case_size()));
1170 mv(t0, in_bytes(MultiBranchData::case_array_offset()));
1171 Assembler::mul(index, index, reg2);
1172 Assembler::add(index, index, t0);
1173
1174 // Update the case count
1175 increment_mdp_data_at(mdp,
1176 index,
1177 in_bytes(MultiBranchData::relative_count_offset()));
1178
1179 // The method data pointer need to be updated.
1180 update_mdp_by_offset(mdp,
1181 index,
1182 in_bytes(MultiBranchData::
1183 relative_displacement_offset()));
1184
1185 bind(profile_continue);
1186 }
1187 }
1188
1189 void InterpreterMacroAssembler::notify_method_entry() {
1190 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1191 // track stack depth. If it is possible to enter interp_only_mode we add
1192 // the code to check if the event should be sent.
1193 if (JvmtiExport::can_post_interpreter_events()) {
1194 Label L;
1195 lwu(x13, Address(xthread, JavaThread::interp_only_mode_offset()));
1196 beqz(x13, L);
1197 call_VM(noreg, CAST_FROM_FN_PTR(address,
1198 InterpreterRuntime::post_method_entry));
1199 bind(L);
1200 }
1201
1202 if (DTraceMethodProbes) {
1203 get_method(c_rarg1);
1204 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1205 xthread, c_rarg1);
1206 }
1207
1208 // RedefineClasses() tracing support for obsolete method entry
1209 if (log_is_enabled(Trace, redefine, class, obsolete)) {
1210 get_method(c_rarg1);
1211 call_VM_leaf(
1212 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1213 xthread, c_rarg1);
1214 }
1215 }
1216
1217
1218 void InterpreterMacroAssembler::notify_method_exit(
1219 TosState state, NotifyMethodExitMode mode) {
1220 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1221 // track stack depth. If it is possible to enter interp_only_mode we add
1222 // the code to check if the event should be sent.
1223 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1224 Label L;
1225 // Note: frame::interpreter_frame_result has a dependency on how the
1226 // method result is saved across the call to post_method_exit. If this
1227 // is changed then the interpreter_frame_result implementation will
1228 // need to be updated too.
1229
1230 // template interpreter will leave the result on the top of the stack.
1231 push(state);
1232 lwu(x13, Address(xthread, JavaThread::interp_only_mode_offset()));
1233 beqz(x13, L);
1234 call_VM(noreg,
1235 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1236 bind(L);
1237 pop(state);
1238 }
1239
1240 if (DTraceMethodProbes) {
1241 push(state);
1242 get_method(c_rarg1);
1243 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1244 xthread, c_rarg1);
1245 pop(state);
1246 }
1247 }
1248
1249
1250 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
1251 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
1252 int increment, Address mask,
1253 Register tmp1, Register tmp2,
1254 bool preloaded, Label* where) {
1255 Label done;
1256 if (!preloaded) {
1257 lwu(tmp1, counter_addr);
1258 }
1259 add(tmp1, tmp1, increment);
1260 sw(tmp1, counter_addr);
1261 lwu(tmp2, mask);
1262 andr(tmp1, tmp1, tmp2);
1263 bnez(tmp1, done);
1264 j(*where); // offset is too large so we have to use j instead of beqz here
1265 bind(done);
1266 }
1267
1268 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
1269 int number_of_arguments) {
1270 // interpreter specific
1271 //
1272 // Note: No need to save/restore xbcp & xlocals pointer since these
1273 // are callee saved registers and no blocking/ GC can happen
1274 // in leaf calls.
1275 #ifdef ASSERT
1276 {
1277 Label L;
1278 ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1279 beqz(t0, L);
1280 stop("InterpreterMacroAssembler::call_VM_leaf_base:"
1281 " last_sp isn't null");
1282 bind(L);
1283 }
1284 #endif /* ASSERT */
1285 // super call
1286 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1287 }
1288
1289 void InterpreterMacroAssembler::call_VM_base(Register oop_result,
1290 Register java_thread,
1291 Register last_java_sp,
1292 Label* return_pc,
1293 address entry_point,
1294 int number_of_arguments,
1295 bool check_exceptions) {
1296 // interpreter specific
1297 //
1298 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
1299 // really make a difference for these runtime calls, since they are
1300 // slow anyway. Btw., bcp must be saved/restored since it may change
1301 // due to GC.
1302 save_bcp();
1303 #ifdef ASSERT
1304 {
1305 Label L;
1306 ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1307 beqz(t0, L);
1308 stop("InterpreterMacroAssembler::call_VM_base:"
1309 " last_sp isn't null");
1310 bind(L);
1311 }
1312 #endif /* ASSERT */
1313 // super call
1314 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
1315 return_pc, entry_point,
1316 number_of_arguments, check_exceptions);
1317 // interpreter specific
1318 restore_bcp();
1319 restore_locals();
1320 }
1321
1322 void InterpreterMacroAssembler::call_VM_preemptable_helper(Register oop_result,
1323 address entry_point,
1324 int number_of_arguments,
1325 bool check_exceptions) {
1326 assert(InterpreterRuntime::is_preemptable_call(entry_point),
1327 "VM call not preemptable, should use call_VM()");
1328 Label resume_pc, not_preempted;
1329
1330 #ifdef ASSERT
1331 {
1332 Label L1, L2;
1333 ld(t0, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1334 beqz(t0, L1);
1335 stop("call_VM_preemptable_helper: Should not have alternate return address set");
1336 bind(L1);
1337 // We check this counter in patch_return_pc_with_preempt_stub() during freeze.
1338 incrementw(Address(xthread, JavaThread::interp_at_preemptable_vmcall_cnt_offset()));
1339 lw(t0, Address(xthread, JavaThread::interp_at_preemptable_vmcall_cnt_offset()));
1340 bgtz(t0, L2);
1341 stop("call_VM_preemptable_helper: should be > 0");
1342 bind(L2);
1343 }
1344 #endif /* ASSERT */
1345
1346 // Force freeze slow path.
1347 push_cont_fastpath();
1348
1349 // Make VM call. In case of preemption set last_pc to the one we want to resume to.
1350 // Note: call_VM_base will use resume_pc label to set last_Java_pc.
1351 call_VM_base(noreg, noreg, noreg, &resume_pc, entry_point, number_of_arguments, false /*check_exceptions*/);
1352
1353 pop_cont_fastpath();
1354
1355 #ifdef ASSERT
1356 {
1357 Label L;
1358 decrementw(Address(xthread, JavaThread::interp_at_preemptable_vmcall_cnt_offset()));
1359 lw(t0, Address(xthread, JavaThread::interp_at_preemptable_vmcall_cnt_offset()));
1360 bgez(t0, L);
1361 stop("call_VM_preemptable_helper: should be >= 0");
1362 bind(L);
1363 }
1364 #endif /* ASSERT */
1365
1366 // Check if preempted.
1367 ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1368 beqz(t1, not_preempted);
1369 sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1370 jr(t1);
1371
1372 // In case of preemption, this is where we will resume once we finally acquire the monitor.
1373 bind(resume_pc);
1374 restore_after_resume(false /* is_native */);
1375
1376 bind(not_preempted);
1377 if (check_exceptions) {
1378 // check for pending exceptions
1379 ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1380 Label ok;
1381 beqz(t0, ok);
1382 la(t1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1383 jr(t1);
1384 bind(ok);
1385 }
1386
1387 // get oop result if there is one and reset the value in the thread
1388 if (oop_result->is_valid()) {
1389 get_vm_result_oop(oop_result, xthread);
1390 }
1391 }
1392
1393 static void pass_arg1(MacroAssembler* masm, Register arg) {
1394 if (c_rarg1 != arg) {
1395 masm->mv(c_rarg1, arg);
1396 }
1397 }
1398
1399 static void pass_arg2(MacroAssembler* masm, Register arg) {
1400 if (c_rarg2 != arg) {
1401 masm->mv(c_rarg2, arg);
1402 }
1403 }
1404
1405 void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
1406 address entry_point,
1407 Register arg_1,
1408 bool check_exceptions) {
1409 pass_arg1(this, arg_1);
1410 call_VM_preemptable_helper(oop_result, entry_point, 1, check_exceptions);
1411 }
1412
1413 void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
1414 address entry_point,
1415 Register arg_1,
1416 Register arg_2,
1417 bool check_exceptions) {
1418 LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
1419 pass_arg2(this, arg_2);
1420 pass_arg1(this, arg_1);
1421 call_VM_preemptable_helper(oop_result, entry_point, 2, check_exceptions);
1422 }
1423
1424 void InterpreterMacroAssembler::restore_after_resume(bool is_native) {
1425 la(t1, ExternalAddress(Interpreter::cont_resume_interpreter_adapter()));
1426 jalr(t1);
1427 if (is_native) {
1428 // On resume we need to set up stack as expected
1429 push(dtos);
1430 push(ltos);
1431 }
1432 }
1433
1434 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) {
1435 assert_different_registers(obj, tmp, t0, mdo_addr.base());
1436 Label update, next, none;
1437
1438 verify_oop(obj);
1439
1440 bnez(obj, update);
1441 orptr(mdo_addr, TypeEntries::null_seen, t0, tmp);
1442 j(next);
1443
1444 bind(update);
1445 load_klass(obj, obj);
1446
1447 ld(tmp, mdo_addr);
1448 xorr(obj, obj, tmp);
1449 andi(t0, obj, TypeEntries::type_klass_mask);
1450 beqz(t0, next); // klass seen before, nothing to
1451 // do. The unknown bit may have been
1452 // set already but no need to check.
1453
1454 test_bit(t0, obj, exact_log2(TypeEntries::type_unknown));
1455 bnez(t0, next);
1456 // already unknown. Nothing to do anymore.
1457
1458 beqz(tmp, none);
1459 mv(t0, (u1)TypeEntries::null_seen);
1460 beq(tmp, t0, none);
1461 // There is a chance that the checks above
1462 // fail if another thread has just set the
1463 // profiling to this obj's klass
1464 xorr(obj, obj, tmp); // get back original value before XOR
1465 ld(tmp, mdo_addr);
1466 xorr(obj, obj, tmp);
1467 andi(t0, obj, TypeEntries::type_klass_mask);
1468 beqz(t0, next);
1469
1470 // different than before. Cannot keep accurate profile.
1471 orptr(mdo_addr, TypeEntries::type_unknown, t0, tmp);
1472 j(next);
1473
1474 bind(none);
1475 // first time here. Set profile type.
1476 sd(obj, mdo_addr);
1477 #ifdef ASSERT
1478 andi(obj, obj, TypeEntries::type_mask);
1479 verify_klass_ptr(obj);
1480 #endif
1481
1482 bind(next);
1483 }
1484
1485 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
1486 if (!ProfileInterpreter) {
1487 return;
1488 }
1489
1490 if (MethodData::profile_arguments() || MethodData::profile_return()) {
1491 Label profile_continue;
1492
1493 test_method_data_pointer(mdp, profile_continue);
1494
1495 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
1496
1497 lbu(t0, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start));
1498 if (is_virtual) {
1499 mv(tmp, (u1)DataLayout::virtual_call_type_data_tag);
1500 bne(t0, tmp, profile_continue);
1501 } else {
1502 mv(tmp, (u1)DataLayout::call_type_data_tag);
1503 bne(t0, tmp, profile_continue);
1504 }
1505
1506 // calculate slot step
1507 static int stack_slot_offset0 = in_bytes(TypeEntriesAtCall::stack_slot_offset(0));
1508 static int slot_step = in_bytes(TypeEntriesAtCall::stack_slot_offset(1)) - stack_slot_offset0;
1509
1510 // calculate type step
1511 static int argument_type_offset0 = in_bytes(TypeEntriesAtCall::argument_type_offset(0));
1512 static int type_step = in_bytes(TypeEntriesAtCall::argument_type_offset(1)) - argument_type_offset0;
1513
1514 if (MethodData::profile_arguments()) {
1515 Label done, loop, loopEnd, profileArgument, profileReturnType;
1516 RegSet pushed_registers;
1517 pushed_registers += x15;
1518 pushed_registers += x16;
1519 pushed_registers += x17;
1520 Register mdo_addr = x15;
1521 Register index = x16;
1522 Register off_to_args = x17;
1523 push_reg(pushed_registers, sp);
1524
1525 mv(off_to_args, in_bytes(TypeEntriesAtCall::args_data_offset()));
1526 mv(t0, TypeProfileArgsLimit);
1527 beqz(t0, loopEnd);
1528
1529 mv(index, zr); // index < TypeProfileArgsLimit
1530 bind(loop);
1531 bgtz(index, profileReturnType);
1532 mv(t0, (int)MethodData::profile_return());
1533 beqz(t0, profileArgument); // (index > 0 || MethodData::profile_return()) == false
1534 bind(profileReturnType);
1535 // If return value type is profiled we may have no argument to profile
1536 ld(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1537 mv(t1, - TypeStackSlotEntries::per_arg_count());
1538 mul(t1, index, t1);
1539 add(tmp, tmp, t1);
1540 mv(t1, TypeStackSlotEntries::per_arg_count());
1541 add(t0, mdp, off_to_args);
1542 blt(tmp, t1, done);
1543
1544 bind(profileArgument);
1545
1546 ld(tmp, Address(callee, Method::const_offset()));
1547 load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
1548 // stack offset o (zero based) from the start of the argument
1549 // list, for n arguments translates into offset n - o - 1 from
1550 // the end of the argument list
1551 mv(t0, stack_slot_offset0);
1552 mv(t1, slot_step);
1553 mul(t1, index, t1);
1554 add(t0, t0, t1);
1555 add(t0, mdp, t0);
1556 ld(t0, Address(t0));
1557 sub(tmp, tmp, t0);
1558 subi(tmp, tmp, 1);
1559 Address arg_addr = argument_address(tmp);
1560 ld(tmp, arg_addr);
1561
1562 mv(t0, argument_type_offset0);
1563 mv(t1, type_step);
1564 mul(t1, index, t1);
1565 add(t0, t0, t1);
1566 add(mdo_addr, mdp, t0);
1567 Address mdo_arg_addr(mdo_addr, 0);
1568 profile_obj_type(tmp, mdo_arg_addr, t1);
1569
1570 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1571 addi(off_to_args, off_to_args, to_add);
1572
1573 // increment index by 1
1574 addi(index, index, 1);
1575 mv(t1, TypeProfileArgsLimit);
1576 blt(index, t1, loop);
1577 bind(loopEnd);
1578
1579 if (MethodData::profile_return()) {
1580 ld(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1581 sub(tmp, tmp, TypeProfileArgsLimit * TypeStackSlotEntries::per_arg_count());
1582 }
1583
1584 add(t0, mdp, off_to_args);
1585 bind(done);
1586 mv(mdp, t0);
1587
1588 // unspill the clobbered registers
1589 pop_reg(pushed_registers, sp);
1590
1591 if (MethodData::profile_return()) {
1592 // We're right after the type profile for the last
1593 // argument. tmp is the number of cells left in the
1594 // CallTypeData/VirtualCallTypeData to reach its end. Non null
1595 // if there's a return to profile.
1596 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1597 shadd(mdp, tmp, mdp, tmp, exact_log2(DataLayout::cell_size));
1598 }
1599 sd(mdp, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
1600 } else {
1601 assert(MethodData::profile_return(), "either profile call args or call ret");
1602 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1603 }
1604
1605 // mdp points right after the end of the
1606 // CallTypeData/VirtualCallTypeData, right after the cells for the
1607 // return value type if there's one
1608
1609 bind(profile_continue);
1610 }
1611 }
1612
1613 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1614 assert_different_registers(mdp, ret, tmp, xbcp, t0, t1);
1615 if (ProfileInterpreter && MethodData::profile_return()) {
1616 Label profile_continue, done;
1617
1618 test_method_data_pointer(mdp, profile_continue);
1619
1620 if (MethodData::profile_return_jsr292_only()) {
1621 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
1622
1623 // If we don't profile all invoke bytecodes we must make sure
1624 // it's a bytecode we indeed profile. We can't go back to the
1625 // beginning of the ProfileData we intend to update to check its
1626 // type because we're right after it and we don't known its
1627 // length
1628 Label do_profile;
1629 lbu(t0, Address(xbcp, 0));
1630 mv(tmp, (u1)Bytecodes::_invokedynamic);
1631 beq(t0, tmp, do_profile);
1632 mv(tmp, (u1)Bytecodes::_invokehandle);
1633 beq(t0, tmp, do_profile);
1634 get_method(tmp);
1635 lhu(t0, Address(tmp, Method::intrinsic_id_offset()));
1636 mv(t1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1637 bne(t0, t1, profile_continue);
1638 bind(do_profile);
1639 }
1640
1641 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1642 mv(tmp, ret);
1643 profile_obj_type(tmp, mdo_ret_addr, t1);
1644
1645 bind(profile_continue);
1646 }
1647 }
1648
1649 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2, Register tmp3) {
1650 assert_different_registers(t0, t1, mdp, tmp1, tmp2, tmp3);
1651 if (ProfileInterpreter && MethodData::profile_parameters()) {
1652 Label profile_continue, done;
1653
1654 test_method_data_pointer(mdp, profile_continue);
1655
1656 // Load the offset of the area within the MDO used for
1657 // parameters. If it's negative we're not profiling any parameters
1658 lwu(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1659 srli(tmp2, tmp1, 31);
1660 bnez(tmp2, profile_continue); // i.e. sign bit set
1661
1662 // Compute a pointer to the area for parameters from the offset
1663 // and move the pointer to the slot for the last
1664 // parameters. Collect profiling from last parameter down.
1665 // mdo start + parameters offset + array length - 1
1666 add(mdp, mdp, tmp1);
1667 ld(tmp1, Address(mdp, ArrayData::array_len_offset()));
1668 subi(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());
1669
1670 Label loop;
1671 bind(loop);
1672
1673 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
1674 int type_base = in_bytes(ParametersTypeData::type_offset(0));
1675 int per_arg_scale = exact_log2(DataLayout::cell_size);
1676 add(t0, mdp, off_base);
1677 add(t1, mdp, type_base);
1678
1679 shadd(tmp2, tmp1, t0, tmp2, per_arg_scale);
1680 // load offset on the stack from the slot for this parameter
1681 ld(tmp2, Address(tmp2, 0));
1682 neg(tmp2, tmp2);
1683
1684 // read the parameter from the local area
1685 shadd(tmp2, tmp2, xlocals, tmp2, Interpreter::logStackElementSize);
1686 ld(tmp2, Address(tmp2, 0));
1687
1688 // profile the parameter
1689 shadd(t1, tmp1, t1, t0, per_arg_scale);
1690 Address arg_type(t1, 0);
1691 profile_obj_type(tmp2, arg_type, tmp3);
1692
1693 // go to next parameter
1694 subi(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());
1695 bgez(tmp1, loop);
1696
1697 bind(profile_continue);
1698 }
1699 }
1700
1701 void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) {
1702 // Get index out of bytecode pointer, get_cache_entry_pointer_at_bcp
1703 // register "cache" is trashed in next ld, so lets use it as a temporary register
1704 get_cache_index_at_bcp(index, cache, 1, sizeof(u4));
1705 // Get address of invokedynamic array
1706 ld(cache, Address(xcpool, in_bytes(ConstantPoolCache::invokedynamic_entries_offset())));
1707 // Scale the index to be the entry index * sizeof(ResolvedIndyEntry)
1708 slli(index, index, log2i_exact(sizeof(ResolvedIndyEntry)));
1709 addi(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes());
1710 add(cache, cache, index);
1711 }
1712
1713 void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) {
1714 // Get index out of bytecode pointer
1715 get_cache_index_at_bcp(index, cache, bcp_offset, sizeof(u2));
1716 // Take shortcut if the size is a power of 2
1717 if (is_power_of_2(sizeof(ResolvedFieldEntry))) {
1718 slli(index, index, log2i_exact(sizeof(ResolvedFieldEntry))); // Scale index by power of 2
1719 } else {
1720 mv(cache, sizeof(ResolvedFieldEntry));
1721 mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedIndyEntry)
1722 }
1723 // Get address of field entries array
1724 ld(cache, Address(xcpool, ConstantPoolCache::field_entries_offset()));
1725 addi(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes());
1726 add(cache, cache, index);
1727 // Prevents stale data from being read after the bytecode is patched to the fast bytecode
1728 membar(MacroAssembler::LoadLoad);
1729 }
1730
1731 void InterpreterMacroAssembler::get_method_counters(Register method,
1732 Register mcs, Label& skip) {
1733 Label has_counters;
1734 ld(mcs, Address(method, Method::method_counters_offset()));
1735 bnez(mcs, has_counters);
1736 call_VM(noreg, CAST_FROM_FN_PTR(address,
1737 InterpreterRuntime::build_method_counters), method);
1738 ld(mcs, Address(method, Method::method_counters_offset()));
1739 beqz(mcs, skip); // No MethodCounters allocated, OutOfMemory
1740 bind(has_counters);
1741 }
1742
1743 void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
1744 // Get index out of bytecode pointer
1745 get_cache_index_at_bcp(index, cache, bcp_offset, sizeof(u2));
1746 mv(cache, sizeof(ResolvedMethodEntry));
1747 mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
1748
1749 // Get address of field entries array
1750 ld(cache, Address(xcpool, ConstantPoolCache::method_entries_offset()));
1751 addi(cache, cache, Array<ResolvedMethodEntry>::base_offset_in_bytes());
1752 add(cache, cache, index);
1753 }
1754
1755 #ifdef ASSERT
1756 void InterpreterMacroAssembler::verify_field_offset(Register reg) {
1757 // Verify the field offset is not in the header, implicitly checks for 0
1758 Label L;
1759 mv(t0, oopDesc::base_offset_in_bytes());
1760 bge(reg, t0, L);
1761 stop("bad field offset");
1762 bind(L);
1763 }
1764
1765 void InterpreterMacroAssembler::verify_access_flags(Register access_flags, uint32_t flag,
1766 const char* msg, bool stop_by_hit) {
1767 Label L;
1768 test_bit(t0, access_flags, exact_log2(flag));
1769 if (stop_by_hit) {
1770 beqz(t0, L);
1771 } else {
1772 bnez(t0, L);
1773 }
1774 stop(msg);
1775 bind(L);
1776 }
1777
1778 void InterpreterMacroAssembler::verify_frame_setup() {
1779 Label L;
1780 const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
1781 ld(t0, monitor_block_top);
1782 shadd(t0, t0, fp, t0, LogBytesPerWord);
1783 beq(esp, t0, L);
1784 stop("broken stack frame setup in interpreter");
1785 bind(L);
1786 }
1787 #endif