1 /*
2 * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "compiler/disassembler.hpp"
28 #include "compiler/compilerDefinitions.inline.hpp"
29 #include "gc/shared/barrierSetAssembler.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "interpreter/interp_masm.hpp"
35 #include "interpreter/templateTable.hpp"
36 #include "memory/universe.hpp"
37 #include "oops/methodData.hpp"
38 #include "oops/method.inline.hpp"
39 #include "oops/objArrayKlass.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "oops/resolvedFieldEntry.hpp"
42 #include "oops/resolvedIndyEntry.hpp"
43 #include "oops/resolvedMethodEntry.hpp"
44 #include "prims/jvmtiExport.hpp"
45 #include "prims/methodHandles.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/frame.inline.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "runtime/stubRoutines.hpp"
50 #include "runtime/synchronizer.hpp"
51 #include "utilities/powerOfTwo.hpp"
52
53 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
54
55 // Address computation: local variables
56
57 static inline Address iaddress(int n) {
58 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
59 }
60
61 static inline Address laddress(int n) {
62 return iaddress(n + 1);
63 }
64
65 static inline Address faddress(int n) {
66 return iaddress(n);
67 }
68
69 static inline Address daddress(int n) {
70 return laddress(n);
71 }
72
73 static inline Address aaddress(int n) {
74 return iaddress(n);
75 }
76
77 static inline Address iaddress(Register r) {
78 return Address(rlocals, r, Address::lsl(3));
79 }
80
81 static inline Address laddress(Register r, Register scratch,
82 InterpreterMacroAssembler* _masm) {
83 __ lea(scratch, Address(rlocals, r, Address::lsl(3)));
84 return Address(scratch, Interpreter::local_offset_in_bytes(1));
85 }
86
87 static inline Address faddress(Register r) {
88 return iaddress(r);
89 }
90
91 static inline Address daddress(Register r, Register scratch,
92 InterpreterMacroAssembler* _masm) {
93 return laddress(r, scratch, _masm);
94 }
95
96 static inline Address aaddress(Register r) {
97 return iaddress(r);
98 }
99
100 // At top of Java expression stack which may be different than esp(). It
101 // isn't for category 1 objects.
102 static inline Address at_tos () {
103 return Address(esp, Interpreter::expr_offset_in_bytes(0));
104 }
105
106 static inline Address at_tos_p1() {
107 return Address(esp, Interpreter::expr_offset_in_bytes(1));
108 }
109
110 static inline Address at_tos_p2() {
111 return Address(esp, Interpreter::expr_offset_in_bytes(2));
112 }
113
114 static inline Address at_tos_p3() {
115 return Address(esp, Interpreter::expr_offset_in_bytes(3));
116 }
117
118 static inline Address at_tos_p4() {
119 return Address(esp, Interpreter::expr_offset_in_bytes(4));
120 }
121
122 static inline Address at_tos_p5() {
123 return Address(esp, Interpreter::expr_offset_in_bytes(5));
124 }
125
126 // Condition conversion
127 static Assembler::Condition j_not(TemplateTable::Condition cc) {
128 switch (cc) {
129 case TemplateTable::equal : return Assembler::NE;
130 case TemplateTable::not_equal : return Assembler::EQ;
131 case TemplateTable::less : return Assembler::GE;
132 case TemplateTable::less_equal : return Assembler::GT;
133 case TemplateTable::greater : return Assembler::LE;
134 case TemplateTable::greater_equal: return Assembler::LT;
135 }
136 ShouldNotReachHere();
137 return Assembler::EQ;
138 }
139
140
141 // Miscellaneous helper routines
142 // Store an oop (or null) at the Address described by obj.
143 // If val == noreg this means store a null
144 static void do_oop_store(InterpreterMacroAssembler* _masm,
145 Address dst,
146 Register val,
147 DecoratorSet decorators) {
148 assert(val == noreg || val == r0, "parameter is just for looks");
149 __ store_heap_oop(dst, val, r10, r11, r3, decorators);
150 }
151
152 static void do_oop_load(InterpreterMacroAssembler* _masm,
153 Address src,
154 Register dst,
155 DecoratorSet decorators) {
156 __ load_heap_oop(dst, src, r10, r11, decorators);
157 }
158
159 Address TemplateTable::at_bcp(int offset) {
160 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
161 return Address(rbcp, offset);
162 }
163
164 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
165 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
166 int byte_no)
167 {
168 assert_different_registers(bc_reg, temp_reg);
169 if (!RewriteBytecodes) return;
170 Label L_patch_done;
171
172 switch (bc) {
173 case Bytecodes::_fast_vputfield:
174 case Bytecodes::_fast_aputfield:
175 case Bytecodes::_fast_bputfield:
176 case Bytecodes::_fast_zputfield:
177 case Bytecodes::_fast_cputfield:
178 case Bytecodes::_fast_dputfield:
179 case Bytecodes::_fast_fputfield:
180 case Bytecodes::_fast_iputfield:
181 case Bytecodes::_fast_lputfield:
182 case Bytecodes::_fast_sputfield:
183 {
184 // We skip bytecode quickening for putfield instructions when
185 // the put_code written to the constant pool cache is zero.
186 // This is required so that every execution of this instruction
187 // calls out to InterpreterRuntime::resolve_get_put to do
188 // additional, required work.
189 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
190 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
191 __ load_field_entry(temp_reg, bc_reg);
192 if (byte_no == f1_byte) {
193 __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset())));
194 } else {
195 __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset())));
196 }
197 // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
198 __ ldarb(temp_reg, temp_reg);
199 __ movw(bc_reg, bc);
200 __ cbzw(temp_reg, L_patch_done); // don't patch
201 }
202 break;
203 default:
204 assert(byte_no == -1, "sanity");
205 // the pair bytecodes have already done the load.
206 if (load_bc_into_bc_reg) {
207 __ movw(bc_reg, bc);
208 }
209 }
210
211 if (JvmtiExport::can_post_breakpoint()) {
212 Label L_fast_patch;
213 // if a breakpoint is present we can't rewrite the stream directly
214 __ load_unsigned_byte(temp_reg, at_bcp(0));
215 __ cmpw(temp_reg, Bytecodes::_breakpoint);
216 __ br(Assembler::NE, L_fast_patch);
217 // Let breakpoint table handling rewrite to quicker bytecode
218 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
219 __ b(L_patch_done);
220 __ bind(L_fast_patch);
221 }
222
223 #ifdef ASSERT
224 Label L_okay;
225 __ load_unsigned_byte(temp_reg, at_bcp(0));
226 __ cmpw(temp_reg, (int) Bytecodes::java_code(bc));
227 __ br(Assembler::EQ, L_okay);
228 __ cmpw(temp_reg, bc_reg);
229 __ br(Assembler::EQ, L_okay);
230 __ stop("patching the wrong bytecode");
231 __ bind(L_okay);
232 #endif
233 // Patch bytecode with release store to coordinate with ResolvedFieldEntry loads
234 // in fast bytecode codelets. load_field_entry has a memory barrier that gains
235 // the needed ordering, together with control dependency on entering the fast codelet
236 // itself.
237 __ lea(temp_reg, at_bcp(0));
238 __ stlrb(bc_reg, temp_reg);
239 __ bind(L_patch_done);
240 }
241
242
243 // Individual instructions
244
245 void TemplateTable::nop() {
246 transition(vtos, vtos);
247 // nothing to do
248 }
249
250 void TemplateTable::shouldnotreachhere() {
251 transition(vtos, vtos);
252 __ stop("shouldnotreachhere bytecode");
253 }
254
255 void TemplateTable::aconst_null()
256 {
257 transition(vtos, atos);
258 __ mov(r0, 0);
259 }
260
261 void TemplateTable::iconst(int value)
262 {
263 transition(vtos, itos);
264 __ mov(r0, value);
265 }
266
267 void TemplateTable::lconst(int value)
268 {
269 __ mov(r0, value);
270 }
271
272 void TemplateTable::fconst(int value)
273 {
274 transition(vtos, ftos);
275 switch (value) {
276 case 0:
277 __ fmovs(v0, 0.0);
278 break;
279 case 1:
280 __ fmovs(v0, 1.0);
281 break;
282 case 2:
283 __ fmovs(v0, 2.0);
284 break;
285 default:
286 ShouldNotReachHere();
287 break;
288 }
289 }
290
291 void TemplateTable::dconst(int value)
292 {
293 transition(vtos, dtos);
294 switch (value) {
295 case 0:
296 __ fmovd(v0, 0.0);
297 break;
298 case 1:
299 __ fmovd(v0, 1.0);
300 break;
301 case 2:
302 __ fmovd(v0, 2.0);
303 break;
304 default:
305 ShouldNotReachHere();
306 break;
307 }
308 }
309
310 void TemplateTable::bipush()
311 {
312 transition(vtos, itos);
313 __ load_signed_byte32(r0, at_bcp(1));
314 }
315
316 void TemplateTable::sipush()
317 {
318 transition(vtos, itos);
319 __ load_unsigned_short(r0, at_bcp(1));
320 __ revw(r0, r0);
321 __ asrw(r0, r0, 16);
322 }
323
324 void TemplateTable::ldc(LdcType type)
325 {
326 transition(vtos, vtos);
327 Label call_ldc, notFloat, notClass, notInt, Done;
328
329 if (is_ldc_wide(type)) {
330 __ get_unsigned_2_byte_index_at_bcp(r1, 1);
331 } else {
332 __ load_unsigned_byte(r1, at_bcp(1));
333 }
334 __ get_cpool_and_tags(r2, r0);
335
336 const int base_offset = ConstantPool::header_size() * wordSize;
337 const int tags_offset = Array<u1>::base_offset_in_bytes();
338
339 // get type
340 __ add(r3, r1, tags_offset);
341 __ lea(r3, Address(r0, r3));
342 __ ldarb(r3, r3);
343
344 // unresolved class - get the resolved class
345 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass);
346 __ br(Assembler::EQ, call_ldc);
347
348 // unresolved class in error state - call into runtime to throw the error
349 // from the first resolution attempt
350 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError);
351 __ br(Assembler::EQ, call_ldc);
352
353 // resolved class - need to call vm to get java mirror of the class
354 __ cmp(r3, (u1)JVM_CONSTANT_Class);
355 __ br(Assembler::NE, notClass);
356
357 __ bind(call_ldc);
358 __ mov(c_rarg1, is_ldc_wide(type) ? 1 : 0);
359 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
360 __ push_ptr(r0);
361 __ verify_oop(r0);
362 __ b(Done);
363
364 __ bind(notClass);
365 __ cmp(r3, (u1)JVM_CONSTANT_Float);
366 __ br(Assembler::NE, notFloat);
367 // ftos
368 __ adds(r1, r2, r1, Assembler::LSL, 3);
369 __ ldrs(v0, Address(r1, base_offset));
370 __ push_f();
371 __ b(Done);
372
373 __ bind(notFloat);
374
375 __ cmp(r3, (u1)JVM_CONSTANT_Integer);
376 __ br(Assembler::NE, notInt);
377
378 // itos
379 __ adds(r1, r2, r1, Assembler::LSL, 3);
380 __ ldrw(r0, Address(r1, base_offset));
381 __ push_i(r0);
382 __ b(Done);
383
384 __ bind(notInt);
385 condy_helper(Done);
386
387 __ bind(Done);
388 }
389
390 // Fast path for caching oop constants.
391 void TemplateTable::fast_aldc(LdcType type)
392 {
393 transition(vtos, atos);
394
395 Register result = r0;
396 Register tmp = r1;
397 Register rarg = r2;
398
399 int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
400
401 Label resolved;
402
403 // We are resolved if the resolved reference cache entry contains a
404 // non-null object (String, MethodType, etc.)
405 assert_different_registers(result, tmp);
406 __ get_cache_index_at_bcp(tmp, 1, index_size);
407 __ load_resolved_reference_at_index(result, tmp);
408 __ cbnz(result, resolved);
409
410 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
411
412 // first time invocation - must resolve first
413 __ mov(rarg, (int)bytecode());
414 __ call_VM(result, entry, rarg);
415
416 __ bind(resolved);
417
418 { // Check for the null sentinel.
419 // If we just called the VM, it already did the mapping for us,
420 // but it's harmless to retry.
421 Label notNull;
422
423 // Stash null_sentinel address to get its value later
424 __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
425 __ ldr(tmp, Address(rarg));
426 __ resolve_oop_handle(tmp, r5, rscratch2);
427 __ cmpoop(result, tmp);
428 __ br(Assembler::NE, notNull);
429 __ mov(result, 0); // null object reference
430 __ bind(notNull);
431 }
432
433 if (VerifyOops) {
434 // Safe to call with 0 result
435 __ verify_oop(result);
436 }
437 }
438
439 void TemplateTable::ldc2_w()
440 {
441 transition(vtos, vtos);
442 Label notDouble, notLong, Done;
443 __ get_unsigned_2_byte_index_at_bcp(r0, 1);
444
445 __ get_cpool_and_tags(r1, r2);
446 const int base_offset = ConstantPool::header_size() * wordSize;
447 const int tags_offset = Array<u1>::base_offset_in_bytes();
448
449 // get type
450 __ lea(r2, Address(r2, r0, Address::lsl(0)));
451 __ load_unsigned_byte(r2, Address(r2, tags_offset));
452 __ cmpw(r2, (int)JVM_CONSTANT_Double);
453 __ br(Assembler::NE, notDouble);
454
455 // dtos
456 __ lea (r2, Address(r1, r0, Address::lsl(3)));
457 __ ldrd(v0, Address(r2, base_offset));
458 __ push_d();
459 __ b(Done);
460
461 __ bind(notDouble);
462 __ cmpw(r2, (int)JVM_CONSTANT_Long);
463 __ br(Assembler::NE, notLong);
464
465 // ltos
466 __ lea(r0, Address(r1, r0, Address::lsl(3)));
467 __ ldr(r0, Address(r0, base_offset));
468 __ push_l();
469 __ b(Done);
470
471 __ bind(notLong);
472 condy_helper(Done);
473
474 __ bind(Done);
475 }
476
477 void TemplateTable::condy_helper(Label& Done)
478 {
479 Register obj = r0;
480 Register rarg = r1;
481 Register flags = r2;
482 Register off = r3;
483
484 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
485
486 __ mov(rarg, (int) bytecode());
487 __ call_VM(obj, entry, rarg);
488
489 __ get_vm_result_metadata(flags, rthread);
490
491 // VMr = obj = base address to find primitive value to push
492 // VMr2 = flags = (tos, off) using format of CPCE::_flags
493 __ mov(off, flags);
494 __ andw(off, off, ConstantPoolCache::field_index_mask);
495
496 const Address field(obj, off);
497
498 // What sort of thing are we loading?
499 // x86 uses a shift and mask or wings it with a shift plus assert
500 // the mask is not needed. aarch64 just uses bitfield extract
501 __ ubfxw(flags, flags, ConstantPoolCache::tos_state_shift,
502 ConstantPoolCache::tos_state_bits);
503
504 switch (bytecode()) {
505 case Bytecodes::_ldc:
506 case Bytecodes::_ldc_w:
507 {
508 // tos in (itos, ftos, stos, btos, ctos, ztos)
509 Label notInt, notFloat, notShort, notByte, notChar, notBool;
510 __ cmpw(flags, itos);
511 __ br(Assembler::NE, notInt);
512 // itos
513 __ ldrw(r0, field);
514 __ push(itos);
515 __ b(Done);
516
517 __ bind(notInt);
518 __ cmpw(flags, ftos);
519 __ br(Assembler::NE, notFloat);
520 // ftos
521 __ load_float(field);
522 __ push(ftos);
523 __ b(Done);
524
525 __ bind(notFloat);
526 __ cmpw(flags, stos);
527 __ br(Assembler::NE, notShort);
528 // stos
529 __ load_signed_short(r0, field);
530 __ push(stos);
531 __ b(Done);
532
533 __ bind(notShort);
534 __ cmpw(flags, btos);
535 __ br(Assembler::NE, notByte);
536 // btos
537 __ load_signed_byte(r0, field);
538 __ push(btos);
539 __ b(Done);
540
541 __ bind(notByte);
542 __ cmpw(flags, ctos);
543 __ br(Assembler::NE, notChar);
544 // ctos
545 __ load_unsigned_short(r0, field);
546 __ push(ctos);
547 __ b(Done);
548
549 __ bind(notChar);
550 __ cmpw(flags, ztos);
551 __ br(Assembler::NE, notBool);
552 // ztos
553 __ load_signed_byte(r0, field);
554 __ push(ztos);
555 __ b(Done);
556
557 __ bind(notBool);
558 break;
559 }
560
561 case Bytecodes::_ldc2_w:
562 {
563 Label notLong, notDouble;
564 __ cmpw(flags, ltos);
565 __ br(Assembler::NE, notLong);
566 // ltos
567 __ ldr(r0, field);
568 __ push(ltos);
569 __ b(Done);
570
571 __ bind(notLong);
572 __ cmpw(flags, dtos);
573 __ br(Assembler::NE, notDouble);
574 // dtos
575 __ load_double(field);
576 __ push(dtos);
577 __ b(Done);
578
579 __ bind(notDouble);
580 break;
581 }
582
583 default:
584 ShouldNotReachHere();
585 }
586
587 __ stop("bad ldc/condy");
588 }
589
590 void TemplateTable::locals_index(Register reg, int offset)
591 {
592 __ ldrb(reg, at_bcp(offset));
593 __ neg(reg, reg);
594 }
595
596 void TemplateTable::iload() {
597 iload_internal();
598 }
599
600 void TemplateTable::nofast_iload() {
601 iload_internal(may_not_rewrite);
602 }
603
604 void TemplateTable::iload_internal(RewriteControl rc) {
605 transition(vtos, itos);
606 if (RewriteFrequentPairs && rc == may_rewrite) {
607 Label rewrite, done;
608 Register bc = r4;
609
610 // get next bytecode
611 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
612
613 // if _iload, wait to rewrite to iload2. We only want to rewrite the
614 // last two iloads in a pair. Comparing against fast_iload means that
615 // the next bytecode is neither an iload or a caload, and therefore
616 // an iload pair.
617 __ cmpw(r1, Bytecodes::_iload);
618 __ br(Assembler::EQ, done);
619
620 // if _fast_iload rewrite to _fast_iload2
621 __ cmpw(r1, Bytecodes::_fast_iload);
622 __ movw(bc, Bytecodes::_fast_iload2);
623 __ br(Assembler::EQ, rewrite);
624
625 // if _caload rewrite to _fast_icaload
626 __ cmpw(r1, Bytecodes::_caload);
627 __ movw(bc, Bytecodes::_fast_icaload);
628 __ br(Assembler::EQ, rewrite);
629
630 // else rewrite to _fast_iload
631 __ movw(bc, Bytecodes::_fast_iload);
632
633 // rewrite
634 // bc: new bytecode
635 __ bind(rewrite);
636 patch_bytecode(Bytecodes::_iload, bc, r1, false);
637 __ bind(done);
638
639 }
640
641 // do iload, get the local value into tos
642 locals_index(r1);
643 __ ldr(r0, iaddress(r1));
644
645 }
646
647 void TemplateTable::fast_iload2()
648 {
649 transition(vtos, itos);
650 locals_index(r1);
651 __ ldr(r0, iaddress(r1));
652 __ push(itos);
653 locals_index(r1, 3);
654 __ ldr(r0, iaddress(r1));
655 }
656
657 void TemplateTable::fast_iload()
658 {
659 transition(vtos, itos);
660 locals_index(r1);
661 __ ldr(r0, iaddress(r1));
662 }
663
664 void TemplateTable::lload()
665 {
666 transition(vtos, ltos);
667 __ ldrb(r1, at_bcp(1));
668 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
669 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
670 }
671
672 void TemplateTable::fload()
673 {
674 transition(vtos, ftos);
675 locals_index(r1);
676 // n.b. we use ldrd here because this is a 64 bit slot
677 // this is comparable to the iload case
678 __ ldrd(v0, faddress(r1));
679 }
680
681 void TemplateTable::dload()
682 {
683 transition(vtos, dtos);
684 __ ldrb(r1, at_bcp(1));
685 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
686 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
687 }
688
689 void TemplateTable::aload()
690 {
691 transition(vtos, atos);
692 locals_index(r1);
693 __ ldr(r0, iaddress(r1));
694 }
695
696 void TemplateTable::locals_index_wide(Register reg) {
697 __ ldrh(reg, at_bcp(2));
698 __ rev16w(reg, reg);
699 __ neg(reg, reg);
700 }
701
702 void TemplateTable::wide_iload() {
703 transition(vtos, itos);
704 locals_index_wide(r1);
705 __ ldr(r0, iaddress(r1));
706 }
707
708 void TemplateTable::wide_lload()
709 {
710 transition(vtos, ltos);
711 __ ldrh(r1, at_bcp(2));
712 __ rev16w(r1, r1);
713 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
714 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
715 }
716
717 void TemplateTable::wide_fload()
718 {
719 transition(vtos, ftos);
720 locals_index_wide(r1);
721 // n.b. we use ldrd here because this is a 64 bit slot
722 // this is comparable to the iload case
723 __ ldrd(v0, faddress(r1));
724 }
725
726 void TemplateTable::wide_dload()
727 {
728 transition(vtos, dtos);
729 __ ldrh(r1, at_bcp(2));
730 __ rev16w(r1, r1);
731 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
732 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
733 }
734
735 void TemplateTable::wide_aload()
736 {
737 transition(vtos, atos);
738 locals_index_wide(r1);
739 __ ldr(r0, aaddress(r1));
740 }
741
742 void TemplateTable::index_check(Register array, Register index)
743 {
744 // destroys r1, rscratch1
745 // sign extend index for use by indexed load
746 // __ movl2ptr(index, index);
747 // check index
748 Register length = rscratch1;
749 __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
750 __ cmpw(index, length);
751 if (index != r1) {
752 // ??? convention: move aberrant index into r1 for exception message
753 assert(r1 != array, "different registers");
754 __ mov(r1, index);
755 }
756 Label ok;
757 __ br(Assembler::LO, ok);
758 // ??? convention: move array into r3 for exception message
759 __ mov(r3, array);
760 __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
761 __ br(rscratch1);
762 __ bind(ok);
763 }
764
765 void TemplateTable::iaload()
766 {
767 transition(itos, itos);
768 __ mov(r1, r0);
769 __ pop_ptr(r0);
770 // r0: array
771 // r1: index
772 index_check(r0, r1); // leaves index in r1, kills rscratch1
773 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
774 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
775 }
776
777 void TemplateTable::laload()
778 {
779 transition(itos, ltos);
780 __ mov(r1, r0);
781 __ pop_ptr(r0);
782 // r0: array
783 // r1: index
784 index_check(r0, r1); // leaves index in r1, kills rscratch1
785 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
786 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
787 }
788
789 void TemplateTable::faload()
790 {
791 transition(itos, ftos);
792 __ mov(r1, r0);
793 __ pop_ptr(r0);
794 // r0: array
795 // r1: index
796 index_check(r0, r1); // leaves index in r1, kills rscratch1
797 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
798 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
799 }
800
801 void TemplateTable::daload()
802 {
803 transition(itos, dtos);
804 __ mov(r1, r0);
805 __ pop_ptr(r0);
806 // r0: array
807 // r1: index
808 index_check(r0, r1); // leaves index in r1, kills rscratch1
809 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
810 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
811 }
812
813 void TemplateTable::aaload()
814 {
815 transition(itos, atos);
816 __ mov(r1, r0);
817 __ pop_ptr(r0);
818 // r0: array
819 // r1: index
820 index_check(r0, r1); // leaves index in r1, kills rscratch1
821 __ profile_array_type<ArrayLoadData>(r2, r0, r4);
822 if (UseArrayFlattening) {
823 Label is_flat_array, done;
824
825 __ test_flat_array_oop(r0, rscratch1 /*temp*/, is_flat_array);
826 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
827 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
828
829 __ b(done);
830 __ bind(is_flat_array);
831 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_load), r0, r1);
832 // Ensure the stores to copy the inline field contents are visible
833 // before any subsequent store that publishes this reference.
834 __ membar(Assembler::StoreStore);
835 __ bind(done);
836 } else {
837 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
838 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
839 }
840 __ profile_element_type(r2, r0, r4);
841 }
842
843 void TemplateTable::baload()
844 {
845 transition(itos, itos);
846 __ mov(r1, r0);
847 __ pop_ptr(r0);
848 // r0: array
849 // r1: index
850 index_check(r0, r1); // leaves index in r1, kills rscratch1
851 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
852 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
853 }
854
855 void TemplateTable::caload()
856 {
857 transition(itos, itos);
858 __ mov(r1, r0);
859 __ pop_ptr(r0);
860 // r0: array
861 // r1: index
862 index_check(r0, r1); // leaves index in r1, kills rscratch1
863 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
864 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
865 }
866
867 // iload followed by caload frequent pair
868 void TemplateTable::fast_icaload()
869 {
870 transition(vtos, itos);
871 // load index out of locals
872 locals_index(r2);
873 __ ldr(r1, iaddress(r2));
874
875 __ pop_ptr(r0);
876
877 // r0: array
878 // r1: index
879 index_check(r0, r1); // leaves index in r1, kills rscratch1
880 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
881 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
882 }
883
884 void TemplateTable::saload()
885 {
886 transition(itos, itos);
887 __ mov(r1, r0);
888 __ pop_ptr(r0);
889 // r0: array
890 // r1: index
891 index_check(r0, r1); // leaves index in r1, kills rscratch1
892 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
893 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
894 }
895
896 void TemplateTable::iload(int n)
897 {
898 transition(vtos, itos);
899 __ ldr(r0, iaddress(n));
900 }
901
902 void TemplateTable::lload(int n)
903 {
904 transition(vtos, ltos);
905 __ ldr(r0, laddress(n));
906 }
907
908 void TemplateTable::fload(int n)
909 {
910 transition(vtos, ftos);
911 __ ldrs(v0, faddress(n));
912 }
913
914 void TemplateTable::dload(int n)
915 {
916 transition(vtos, dtos);
917 __ ldrd(v0, daddress(n));
918 }
919
920 void TemplateTable::aload(int n)
921 {
922 transition(vtos, atos);
923 __ ldr(r0, iaddress(n));
924 }
925
926 void TemplateTable::aload_0() {
927 aload_0_internal();
928 }
929
930 void TemplateTable::nofast_aload_0() {
931 aload_0_internal(may_not_rewrite);
932 }
933
934 void TemplateTable::aload_0_internal(RewriteControl rc) {
935 // According to bytecode histograms, the pairs:
936 //
937 // _aload_0, _fast_igetfield
938 // _aload_0, _fast_agetfield
939 // _aload_0, _fast_fgetfield
940 //
941 // occur frequently. If RewriteFrequentPairs is set, the (slow)
942 // _aload_0 bytecode checks if the next bytecode is either
943 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
944 // rewrites the current bytecode into a pair bytecode; otherwise it
945 // rewrites the current bytecode into _fast_aload_0 that doesn't do
946 // the pair check anymore.
947 //
948 // Note: If the next bytecode is _getfield, the rewrite must be
949 // delayed, otherwise we may miss an opportunity for a pair.
950 //
951 // Also rewrite frequent pairs
952 // aload_0, aload_1
953 // aload_0, iload_1
954 // These bytecodes with a small amount of code are most profitable
955 // to rewrite
956 if (RewriteFrequentPairs && rc == may_rewrite) {
957 Label rewrite, done;
958 const Register bc = r4;
959
960 // get next bytecode
961 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
962
963 // if _getfield then wait with rewrite
964 __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
965 __ br(Assembler::EQ, done);
966
967 // if _igetfield then rewrite to _fast_iaccess_0
968 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
969 __ cmpw(r1, Bytecodes::_fast_igetfield);
970 __ movw(bc, Bytecodes::_fast_iaccess_0);
971 __ br(Assembler::EQ, rewrite);
972
973 // if _agetfield then rewrite to _fast_aaccess_0
974 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
975 __ cmpw(r1, Bytecodes::_fast_agetfield);
976 __ movw(bc, Bytecodes::_fast_aaccess_0);
977 __ br(Assembler::EQ, rewrite);
978
979 // if _fgetfield then rewrite to _fast_faccess_0
980 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
981 __ cmpw(r1, Bytecodes::_fast_fgetfield);
982 __ movw(bc, Bytecodes::_fast_faccess_0);
983 __ br(Assembler::EQ, rewrite);
984
985 // else rewrite to _fast_aload0
986 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
987 __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
988
989 // rewrite
990 // bc: new bytecode
991 __ bind(rewrite);
992 patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
993
994 __ bind(done);
995 }
996
997 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
998 aload(0);
999 }
1000
1001 void TemplateTable::istore()
1002 {
1003 transition(itos, vtos);
1004 locals_index(r1);
1005 // FIXME: We're being very pernickerty here storing a jint in a
1006 // local with strw, which costs an extra instruction over what we'd
1007 // be able to do with a simple str. We should just store the whole
1008 // word.
1009 __ lea(rscratch1, iaddress(r1));
1010 __ strw(r0, Address(rscratch1));
1011 }
1012
1013 void TemplateTable::lstore()
1014 {
1015 transition(ltos, vtos);
1016 locals_index(r1);
1017 __ str(r0, laddress(r1, rscratch1, _masm));
1018 }
1019
1020 void TemplateTable::fstore() {
1021 transition(ftos, vtos);
1022 locals_index(r1);
1023 __ lea(rscratch1, iaddress(r1));
1024 __ strs(v0, Address(rscratch1));
1025 }
1026
1027 void TemplateTable::dstore() {
1028 transition(dtos, vtos);
1029 locals_index(r1);
1030 __ strd(v0, daddress(r1, rscratch1, _masm));
1031 }
1032
1033 void TemplateTable::astore()
1034 {
1035 transition(vtos, vtos);
1036 __ pop_ptr(r0);
1037 locals_index(r1);
1038 __ str(r0, aaddress(r1));
1039 }
1040
1041 void TemplateTable::wide_istore() {
1042 transition(vtos, vtos);
1043 __ pop_i();
1044 locals_index_wide(r1);
1045 __ lea(rscratch1, iaddress(r1));
1046 __ strw(r0, Address(rscratch1));
1047 }
1048
1049 void TemplateTable::wide_lstore() {
1050 transition(vtos, vtos);
1051 __ pop_l();
1052 locals_index_wide(r1);
1053 __ str(r0, laddress(r1, rscratch1, _masm));
1054 }
1055
1056 void TemplateTable::wide_fstore() {
1057 transition(vtos, vtos);
1058 __ pop_f();
1059 locals_index_wide(r1);
1060 __ lea(rscratch1, faddress(r1));
1061 __ strs(v0, rscratch1);
1062 }
1063
1064 void TemplateTable::wide_dstore() {
1065 transition(vtos, vtos);
1066 __ pop_d();
1067 locals_index_wide(r1);
1068 __ strd(v0, daddress(r1, rscratch1, _masm));
1069 }
1070
1071 void TemplateTable::wide_astore() {
1072 transition(vtos, vtos);
1073 __ pop_ptr(r0);
1074 locals_index_wide(r1);
1075 __ str(r0, aaddress(r1));
1076 }
1077
1078 void TemplateTable::iastore() {
1079 transition(itos, vtos);
1080 __ pop_i(r1);
1081 __ pop_ptr(r3);
1082 // r0: value
1083 // r1: index
1084 // r3: array
1085 index_check(r3, r1); // prefer index in r1
1086 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
1087 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg, noreg);
1088 }
1089
1090 void TemplateTable::lastore() {
1091 transition(ltos, vtos);
1092 __ pop_i(r1);
1093 __ pop_ptr(r3);
1094 // r0: value
1095 // r1: index
1096 // r3: array
1097 index_check(r3, r1); // prefer index in r1
1098 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
1099 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg, noreg);
1100 }
1101
1102 void TemplateTable::fastore() {
1103 transition(ftos, vtos);
1104 __ pop_i(r1);
1105 __ pop_ptr(r3);
1106 // v0: value
1107 // r1: index
1108 // r3: array
1109 index_check(r3, r1); // prefer index in r1
1110 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1111 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg, noreg);
1112 }
1113
1114 void TemplateTable::dastore() {
1115 transition(dtos, vtos);
1116 __ pop_i(r1);
1117 __ pop_ptr(r3);
1118 // v0: value
1119 // r1: index
1120 // r3: array
1121 index_check(r3, r1); // prefer index in r1
1122 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1123 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg);
1124 }
1125
1126 void TemplateTable::aastore() {
1127 Label is_null, is_flat_array, ok_is_subtype, done;
1128 transition(vtos, vtos);
1129 // stack: ..., array, index, value
1130 __ ldr(r0, at_tos()); // value
1131 __ ldr(r2, at_tos_p1()); // index
1132 __ ldr(r3, at_tos_p2()); // array
1133
1134 index_check(r3, r2); // kills r1
1135
1136 __ profile_array_type<ArrayStoreData>(r4, r3, r5);
1137 __ profile_multiple_element_types(r4, r0, r5, r6);
1138
1139 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1140 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1141 // Be careful not to clobber r4 below
1142
1143 // do array store check - check for null value first
1144 __ cbz(r0, is_null);
1145
1146 // Move array class to r5
1147 __ load_klass(r5, r3);
1148
1149 if (UseArrayFlattening) {
1150 __ ldrw(r6, Address(r5, Klass::layout_helper_offset()));
1151 __ test_flat_array_layout(r6, is_flat_array);
1152 }
1153
1154 // Move subklass into r1
1155 __ load_klass(r1, r0);
1156
1157 // Move array element superklass into r0
1158 __ ldr(r0, Address(r5, ObjArrayKlass::element_klass_offset()));
1159 // Compress array + index*oopSize + 12 into a single register. Frees r2.
1160
1161 // Generate subtype check. Blows r2, r5
1162 // Superklass in r0. Subklass in r1.
1163
1164 // is "r1 <: r0" ? (value subclass <: array element superclass)
1165 __ gen_subtype_check(r1, ok_is_subtype, false);
1166
1167 // Come here on failure
1168 // object is at TOS
1169 __ b(Interpreter::_throw_ArrayStoreException_entry);
1170
1171 // Come here on success
1172 __ bind(ok_is_subtype);
1173
1174 // Get the value we will store
1175 __ ldr(r0, at_tos());
1176 // Now store using the appropriate barrier
1177 // Clobbers: r10, r11, r3
1178 do_oop_store(_masm, element_address, r0, IS_ARRAY);
1179 __ b(done);
1180
1181 // Have a null in r0, r3=array, r2=index. Store null at ary[idx]
1182 __ bind(is_null);
1183 if (Arguments::is_valhalla_enabled()) {
1184 Label is_null_into_value_array_npe, store_null;
1185
1186 if (UseArrayFlattening) {
1187 __ test_flat_array_oop(r3, rscratch1, is_flat_array);
1188 }
1189
1190 // No way to store null in a null-free array
1191 __ test_null_free_array_oop(r3, rscratch1, is_null_into_value_array_npe);
1192 __ b(store_null);
1193
1194 __ bind(is_null_into_value_array_npe);
1195 __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
1196
1197 __ bind(store_null);
1198 }
1199
1200 // Store a null
1201 // Clobbers: r10, r11, r3
1202 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1203 __ b(done);
1204
1205 if (UseArrayFlattening) {
1206 Label is_type_ok;
1207 __ bind(is_flat_array); // Store non-null value to flat
1208
1209 __ ldr(r0, at_tos()); // value
1210 __ ldr(r3, at_tos_p1()); // index
1211 __ ldr(r2, at_tos_p2()); // array
1212 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_store), r0, r2, r3);
1213 }
1214
1215 // Pop stack arguments
1216 __ bind(done);
1217 __ add(esp, esp, 3 * Interpreter::stackElementSize);
1218 }
1219
1220 void TemplateTable::bastore()
1221 {
1222 transition(itos, vtos);
1223 __ pop_i(r1);
1224 __ pop_ptr(r3);
1225 // r0: value
1226 // r1: index
1227 // r3: array
1228 index_check(r3, r1); // prefer index in r1
1229
1230 // Need to check whether array is boolean or byte
1231 // since both types share the bastore bytecode.
1232 __ load_klass(r2, r3);
1233 __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
1234 int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
1235 Label L_skip;
1236 __ tbz(r2, diffbit_index, L_skip);
1237 __ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1238 __ bind(L_skip);
1239
1240 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
1241 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg, noreg);
1242 }
1243
1244 void TemplateTable::castore()
1245 {
1246 transition(itos, vtos);
1247 __ pop_i(r1);
1248 __ pop_ptr(r3);
1249 // r0: value
1250 // r1: index
1251 // r3: array
1252 index_check(r3, r1); // prefer index in r1
1253 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
1254 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg, noreg);
1255 }
1256
1257 void TemplateTable::sastore()
1258 {
1259 castore();
1260 }
1261
1262 void TemplateTable::istore(int n)
1263 {
1264 transition(itos, vtos);
1265 __ str(r0, iaddress(n));
1266 }
1267
1268 void TemplateTable::lstore(int n)
1269 {
1270 transition(ltos, vtos);
1271 __ str(r0, laddress(n));
1272 }
1273
1274 void TemplateTable::fstore(int n)
1275 {
1276 transition(ftos, vtos);
1277 __ strs(v0, faddress(n));
1278 }
1279
1280 void TemplateTable::dstore(int n)
1281 {
1282 transition(dtos, vtos);
1283 __ strd(v0, daddress(n));
1284 }
1285
1286 void TemplateTable::astore(int n)
1287 {
1288 transition(vtos, vtos);
1289 __ pop_ptr(r0);
1290 __ str(r0, iaddress(n));
1291 }
1292
1293 void TemplateTable::pop()
1294 {
1295 transition(vtos, vtos);
1296 __ add(esp, esp, Interpreter::stackElementSize);
1297 }
1298
1299 void TemplateTable::pop2()
1300 {
1301 transition(vtos, vtos);
1302 __ add(esp, esp, 2 * Interpreter::stackElementSize);
1303 }
1304
1305 void TemplateTable::dup()
1306 {
1307 transition(vtos, vtos);
1308 __ ldr(r0, Address(esp, 0));
1309 __ push(r0);
1310 // stack: ..., a, a
1311 }
1312
1313 void TemplateTable::dup_x1()
1314 {
1315 transition(vtos, vtos);
1316 // stack: ..., a, b
1317 __ ldr(r0, at_tos()); // load b
1318 __ ldr(r2, at_tos_p1()); // load a
1319 __ str(r0, at_tos_p1()); // store b
1320 __ str(r2, at_tos()); // store a
1321 __ push(r0); // push b
1322 // stack: ..., b, a, b
1323 }
1324
1325 void TemplateTable::dup_x2()
1326 {
1327 transition(vtos, vtos);
1328 // stack: ..., a, b, c
1329 __ ldr(r0, at_tos()); // load c
1330 __ ldr(r2, at_tos_p2()); // load a
1331 __ str(r0, at_tos_p2()); // store c in a
1332 __ push(r0); // push c
1333 // stack: ..., c, b, c, c
1334 __ ldr(r0, at_tos_p2()); // load b
1335 __ str(r2, at_tos_p2()); // store a in b
1336 // stack: ..., c, a, c, c
1337 __ str(r0, at_tos_p1()); // store b in c
1338 // stack: ..., c, a, b, c
1339 }
1340
1341 void TemplateTable::dup2()
1342 {
1343 transition(vtos, vtos);
1344 // stack: ..., a, b
1345 __ ldr(r0, at_tos_p1()); // load a
1346 __ push(r0); // push a
1347 __ ldr(r0, at_tos_p1()); // load b
1348 __ push(r0); // push b
1349 // stack: ..., a, b, a, b
1350 }
1351
1352 void TemplateTable::dup2_x1()
1353 {
1354 transition(vtos, vtos);
1355 // stack: ..., a, b, c
1356 __ ldr(r2, at_tos()); // load c
1357 __ ldr(r0, at_tos_p1()); // load b
1358 __ push(r0); // push b
1359 __ push(r2); // push c
1360 // stack: ..., a, b, c, b, c
1361 __ str(r2, at_tos_p3()); // store c in b
1362 // stack: ..., a, c, c, b, c
1363 __ ldr(r2, at_tos_p4()); // load a
1364 __ str(r2, at_tos_p2()); // store a in 2nd c
1365 // stack: ..., a, c, a, b, c
1366 __ str(r0, at_tos_p4()); // store b in a
1367 // stack: ..., b, c, a, b, c
1368 }
1369
1370 void TemplateTable::dup2_x2()
1371 {
1372 transition(vtos, vtos);
1373 // stack: ..., a, b, c, d
1374 __ ldr(r2, at_tos()); // load d
1375 __ ldr(r0, at_tos_p1()); // load c
1376 __ push(r0) ; // push c
1377 __ push(r2); // push d
1378 // stack: ..., a, b, c, d, c, d
1379 __ ldr(r0, at_tos_p4()); // load b
1380 __ str(r0, at_tos_p2()); // store b in d
1381 __ str(r2, at_tos_p4()); // store d in b
1382 // stack: ..., a, d, c, b, c, d
1383 __ ldr(r2, at_tos_p5()); // load a
1384 __ ldr(r0, at_tos_p3()); // load c
1385 __ str(r2, at_tos_p3()); // store a in c
1386 __ str(r0, at_tos_p5()); // store c in a
1387 // stack: ..., c, d, a, b, c, d
1388 }
1389
1390 void TemplateTable::swap()
1391 {
1392 transition(vtos, vtos);
1393 // stack: ..., a, b
1394 __ ldr(r2, at_tos_p1()); // load a
1395 __ ldr(r0, at_tos()); // load b
1396 __ str(r2, at_tos()); // store a in b
1397 __ str(r0, at_tos_p1()); // store b in a
1398 // stack: ..., b, a
1399 }
1400
1401 void TemplateTable::iop2(Operation op)
1402 {
1403 transition(itos, itos);
1404 // r0 <== r1 op r0
1405 __ pop_i(r1);
1406 switch (op) {
1407 case add : __ addw(r0, r1, r0); break;
1408 case sub : __ subw(r0, r1, r0); break;
1409 case mul : __ mulw(r0, r1, r0); break;
1410 case _and : __ andw(r0, r1, r0); break;
1411 case _or : __ orrw(r0, r1, r0); break;
1412 case _xor : __ eorw(r0, r1, r0); break;
1413 case shl : __ lslvw(r0, r1, r0); break;
1414 case shr : __ asrvw(r0, r1, r0); break;
1415 case ushr : __ lsrvw(r0, r1, r0);break;
1416 default : ShouldNotReachHere();
1417 }
1418 }
1419
1420 void TemplateTable::lop2(Operation op)
1421 {
1422 transition(ltos, ltos);
1423 // r0 <== r1 op r0
1424 __ pop_l(r1);
1425 switch (op) {
1426 case add : __ add(r0, r1, r0); break;
1427 case sub : __ sub(r0, r1, r0); break;
1428 case mul : __ mul(r0, r1, r0); break;
1429 case _and : __ andr(r0, r1, r0); break;
1430 case _or : __ orr(r0, r1, r0); break;
1431 case _xor : __ eor(r0, r1, r0); break;
1432 default : ShouldNotReachHere();
1433 }
1434 }
1435
1436 void TemplateTable::idiv()
1437 {
1438 transition(itos, itos);
1439 // explicitly check for div0
1440 Label no_div0;
1441 __ cbnzw(r0, no_div0);
1442 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1443 __ br(rscratch1);
1444 __ bind(no_div0);
1445 __ pop_i(r1);
1446 // r0 <== r1 idiv r0
1447 __ corrected_idivl(r0, r1, r0, /* want_remainder */ false);
1448 }
1449
1450 void TemplateTable::irem()
1451 {
1452 transition(itos, itos);
1453 // explicitly check for div0
1454 Label no_div0;
1455 __ cbnzw(r0, no_div0);
1456 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1457 __ br(rscratch1);
1458 __ bind(no_div0);
1459 __ pop_i(r1);
1460 // r0 <== r1 irem r0
1461 __ corrected_idivl(r0, r1, r0, /* want_remainder */ true);
1462 }
1463
1464 void TemplateTable::lmul()
1465 {
1466 transition(ltos, ltos);
1467 __ pop_l(r1);
1468 __ mul(r0, r0, r1);
1469 }
1470
1471 void TemplateTable::ldiv()
1472 {
1473 transition(ltos, ltos);
1474 // explicitly check for div0
1475 Label no_div0;
1476 __ cbnz(r0, no_div0);
1477 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1478 __ br(rscratch1);
1479 __ bind(no_div0);
1480 __ pop_l(r1);
1481 // r0 <== r1 ldiv r0
1482 __ corrected_idivq(r0, r1, r0, /* want_remainder */ false);
1483 }
1484
1485 void TemplateTable::lrem()
1486 {
1487 transition(ltos, ltos);
1488 // explicitly check for div0
1489 Label no_div0;
1490 __ cbnz(r0, no_div0);
1491 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1492 __ br(rscratch1);
1493 __ bind(no_div0);
1494 __ pop_l(r1);
1495 // r0 <== r1 lrem r0
1496 __ corrected_idivq(r0, r1, r0, /* want_remainder */ true);
1497 }
1498
1499 void TemplateTable::lshl()
1500 {
1501 transition(itos, ltos);
1502 // shift count is in r0
1503 __ pop_l(r1);
1504 __ lslv(r0, r1, r0);
1505 }
1506
1507 void TemplateTable::lshr()
1508 {
1509 transition(itos, ltos);
1510 // shift count is in r0
1511 __ pop_l(r1);
1512 __ asrv(r0, r1, r0);
1513 }
1514
1515 void TemplateTable::lushr()
1516 {
1517 transition(itos, ltos);
1518 // shift count is in r0
1519 __ pop_l(r1);
1520 __ lsrv(r0, r1, r0);
1521 }
1522
1523 void TemplateTable::fop2(Operation op)
1524 {
1525 transition(ftos, ftos);
1526 switch (op) {
1527 case add:
1528 // n.b. use ldrd because this is a 64 bit slot
1529 __ pop_f(v1);
1530 __ fadds(v0, v1, v0);
1531 break;
1532 case sub:
1533 __ pop_f(v1);
1534 __ fsubs(v0, v1, v0);
1535 break;
1536 case mul:
1537 __ pop_f(v1);
1538 __ fmuls(v0, v1, v0);
1539 break;
1540 case div:
1541 __ pop_f(v1);
1542 __ fdivs(v0, v1, v0);
1543 break;
1544 case rem:
1545 __ fmovs(v1, v0);
1546 __ pop_f(v0);
1547 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1548 break;
1549 default:
1550 ShouldNotReachHere();
1551 break;
1552 }
1553 }
1554
1555 void TemplateTable::dop2(Operation op)
1556 {
1557 transition(dtos, dtos);
1558 switch (op) {
1559 case add:
1560 // n.b. use ldrd because this is a 64 bit slot
1561 __ pop_d(v1);
1562 __ faddd(v0, v1, v0);
1563 break;
1564 case sub:
1565 __ pop_d(v1);
1566 __ fsubd(v0, v1, v0);
1567 break;
1568 case mul:
1569 __ pop_d(v1);
1570 __ fmuld(v0, v1, v0);
1571 break;
1572 case div:
1573 __ pop_d(v1);
1574 __ fdivd(v0, v1, v0);
1575 break;
1576 case rem:
1577 __ fmovd(v1, v0);
1578 __ pop_d(v0);
1579 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1580 break;
1581 default:
1582 ShouldNotReachHere();
1583 break;
1584 }
1585 }
1586
1587 void TemplateTable::ineg()
1588 {
1589 transition(itos, itos);
1590 __ negw(r0, r0);
1591
1592 }
1593
1594 void TemplateTable::lneg()
1595 {
1596 transition(ltos, ltos);
1597 __ neg(r0, r0);
1598 }
1599
1600 void TemplateTable::fneg()
1601 {
1602 transition(ftos, ftos);
1603 __ fnegs(v0, v0);
1604 }
1605
1606 void TemplateTable::dneg()
1607 {
1608 transition(dtos, dtos);
1609 __ fnegd(v0, v0);
1610 }
1611
1612 void TemplateTable::iinc()
1613 {
1614 transition(vtos, vtos);
1615 __ load_signed_byte(r1, at_bcp(2)); // get constant
1616 locals_index(r2);
1617 __ ldr(r0, iaddress(r2));
1618 __ addw(r0, r0, r1);
1619 __ str(r0, iaddress(r2));
1620 }
1621
1622 void TemplateTable::wide_iinc()
1623 {
1624 transition(vtos, vtos);
1625 // __ mov(r1, zr);
1626 __ ldrw(r1, at_bcp(2)); // get constant and index
1627 __ rev16(r1, r1);
1628 __ ubfx(r2, r1, 0, 16);
1629 __ neg(r2, r2);
1630 __ sbfx(r1, r1, 16, 16);
1631 __ ldr(r0, iaddress(r2));
1632 __ addw(r0, r0, r1);
1633 __ str(r0, iaddress(r2));
1634 }
1635
1636 void TemplateTable::convert()
1637 {
1638 // Checking
1639 #ifdef ASSERT
1640 {
1641 TosState tos_in = ilgl;
1642 TosState tos_out = ilgl;
1643 switch (bytecode()) {
1644 case Bytecodes::_i2l: // fall through
1645 case Bytecodes::_i2f: // fall through
1646 case Bytecodes::_i2d: // fall through
1647 case Bytecodes::_i2b: // fall through
1648 case Bytecodes::_i2c: // fall through
1649 case Bytecodes::_i2s: tos_in = itos; break;
1650 case Bytecodes::_l2i: // fall through
1651 case Bytecodes::_l2f: // fall through
1652 case Bytecodes::_l2d: tos_in = ltos; break;
1653 case Bytecodes::_f2i: // fall through
1654 case Bytecodes::_f2l: // fall through
1655 case Bytecodes::_f2d: tos_in = ftos; break;
1656 case Bytecodes::_d2i: // fall through
1657 case Bytecodes::_d2l: // fall through
1658 case Bytecodes::_d2f: tos_in = dtos; break;
1659 default : ShouldNotReachHere();
1660 }
1661 switch (bytecode()) {
1662 case Bytecodes::_l2i: // fall through
1663 case Bytecodes::_f2i: // fall through
1664 case Bytecodes::_d2i: // fall through
1665 case Bytecodes::_i2b: // fall through
1666 case Bytecodes::_i2c: // fall through
1667 case Bytecodes::_i2s: tos_out = itos; break;
1668 case Bytecodes::_i2l: // fall through
1669 case Bytecodes::_f2l: // fall through
1670 case Bytecodes::_d2l: tos_out = ltos; break;
1671 case Bytecodes::_i2f: // fall through
1672 case Bytecodes::_l2f: // fall through
1673 case Bytecodes::_d2f: tos_out = ftos; break;
1674 case Bytecodes::_i2d: // fall through
1675 case Bytecodes::_l2d: // fall through
1676 case Bytecodes::_f2d: tos_out = dtos; break;
1677 default : ShouldNotReachHere();
1678 }
1679 transition(tos_in, tos_out);
1680 }
1681 #endif // ASSERT
1682 // static const int64_t is_nan = 0x8000000000000000L;
1683
1684 // Conversion
1685 switch (bytecode()) {
1686 case Bytecodes::_i2l:
1687 __ sxtw(r0, r0);
1688 break;
1689 case Bytecodes::_i2f:
1690 __ scvtfws(v0, r0);
1691 break;
1692 case Bytecodes::_i2d:
1693 __ scvtfwd(v0, r0);
1694 break;
1695 case Bytecodes::_i2b:
1696 __ sxtbw(r0, r0);
1697 break;
1698 case Bytecodes::_i2c:
1699 __ uxthw(r0, r0);
1700 break;
1701 case Bytecodes::_i2s:
1702 __ sxthw(r0, r0);
1703 break;
1704 case Bytecodes::_l2i:
1705 __ uxtw(r0, r0);
1706 break;
1707 case Bytecodes::_l2f:
1708 __ scvtfs(v0, r0);
1709 break;
1710 case Bytecodes::_l2d:
1711 __ scvtfd(v0, r0);
1712 break;
1713 case Bytecodes::_f2i:
1714 {
1715 Label L_Okay;
1716 __ clear_fpsr();
1717 __ fcvtzsw(r0, v0);
1718 __ get_fpsr(r1);
1719 __ cbzw(r1, L_Okay);
1720 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i));
1721 __ bind(L_Okay);
1722 }
1723 break;
1724 case Bytecodes::_f2l:
1725 {
1726 Label L_Okay;
1727 __ clear_fpsr();
1728 __ fcvtzs(r0, v0);
1729 __ get_fpsr(r1);
1730 __ cbzw(r1, L_Okay);
1731 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1732 __ bind(L_Okay);
1733 }
1734 break;
1735 case Bytecodes::_f2d:
1736 __ fcvts(v0, v0);
1737 break;
1738 case Bytecodes::_d2i:
1739 {
1740 Label L_Okay;
1741 __ clear_fpsr();
1742 __ fcvtzdw(r0, v0);
1743 __ get_fpsr(r1);
1744 __ cbzw(r1, L_Okay);
1745 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
1746 __ bind(L_Okay);
1747 }
1748 break;
1749 case Bytecodes::_d2l:
1750 {
1751 Label L_Okay;
1752 __ clear_fpsr();
1753 __ fcvtzd(r0, v0);
1754 __ get_fpsr(r1);
1755 __ cbzw(r1, L_Okay);
1756 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1757 __ bind(L_Okay);
1758 }
1759 break;
1760 case Bytecodes::_d2f:
1761 __ fcvtd(v0, v0);
1762 break;
1763 default:
1764 ShouldNotReachHere();
1765 }
1766 }
1767
1768 void TemplateTable::lcmp()
1769 {
1770 transition(ltos, itos);
1771 Label done;
1772 __ pop_l(r1);
1773 __ cmp(r1, r0);
1774 __ mov(r0, (uint64_t)-1L);
1775 __ br(Assembler::LT, done);
1776 // __ mov(r0, 1UL);
1777 // __ csel(r0, r0, zr, Assembler::NE);
1778 // and here is a faster way
1779 __ csinc(r0, zr, zr, Assembler::EQ);
1780 __ bind(done);
1781 }
1782
1783 void TemplateTable::float_cmp(bool is_float, int unordered_result)
1784 {
1785 Label done;
1786 if (is_float) {
1787 // XXX get rid of pop here, use ... reg, mem32
1788 __ pop_f(v1);
1789 __ fcmps(v1, v0);
1790 } else {
1791 // XXX get rid of pop here, use ... reg, mem64
1792 __ pop_d(v1);
1793 __ fcmpd(v1, v0);
1794 }
1795 if (unordered_result < 0) {
1796 // we want -1 for unordered or less than, 0 for equal and 1 for
1797 // greater than.
1798 __ mov(r0, (uint64_t)-1L);
1799 // for FP LT tests less than or unordered
1800 __ br(Assembler::LT, done);
1801 // install 0 for EQ otherwise 1
1802 __ csinc(r0, zr, zr, Assembler::EQ);
1803 } else {
1804 // we want -1 for less than, 0 for equal and 1 for unordered or
1805 // greater than.
1806 __ mov(r0, 1L);
1807 // for FP HI tests greater than or unordered
1808 __ br(Assembler::HI, done);
1809 // install 0 for EQ otherwise ~0
1810 __ csinv(r0, zr, zr, Assembler::EQ);
1811
1812 }
1813 __ bind(done);
1814 }
1815
1816 void TemplateTable::branch(bool is_jsr, bool is_wide)
1817 {
1818 __ profile_taken_branch(r0);
1819 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1820 InvocationCounter::counter_offset();
1821 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1822 InvocationCounter::counter_offset();
1823
1824 // load branch displacement
1825 if (!is_wide) {
1826 __ ldrh(r2, at_bcp(1));
1827 __ rev16(r2, r2);
1828 // sign extend the 16 bit value in r2
1829 __ sbfm(r2, r2, 0, 15);
1830 } else {
1831 __ ldrw(r2, at_bcp(1));
1832 __ revw(r2, r2);
1833 // sign extend the 32 bit value in r2
1834 __ sbfm(r2, r2, 0, 31);
1835 }
1836
1837 // Handle all the JSR stuff here, then exit.
1838 // It's much shorter and cleaner than intermingling with the non-JSR
1839 // normal-branch stuff occurring below.
1840
1841 if (is_jsr) {
1842 // Pre-load the next target bytecode into rscratch1
1843 __ load_unsigned_byte(rscratch1, Address(rbcp, r2));
1844 // compute return address as bci
1845 __ ldr(rscratch2, Address(rmethod, Method::const_offset()));
1846 __ add(rscratch2, rscratch2,
1847 in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
1848 __ sub(r1, rbcp, rscratch2);
1849 __ push_i(r1);
1850 // Adjust the bcp by the 16-bit displacement in r2
1851 __ add(rbcp, rbcp, r2);
1852 __ dispatch_only(vtos, /*generate_poll*/true);
1853 return;
1854 }
1855
1856 // Normal (non-jsr) branch handling
1857
1858 // Adjust the bcp by the displacement in r2
1859 __ add(rbcp, rbcp, r2);
1860
1861 assert(UseLoopCounter || !UseOnStackReplacement,
1862 "on-stack-replacement requires loop counters");
1863 Label backedge_counter_overflow;
1864 Label dispatch;
1865 if (UseLoopCounter) {
1866 // increment backedge counter for backward branches
1867 // r0: MDO
1868 // r2: target offset
1869 __ cmp(r2, zr);
1870 __ br(Assembler::GT, dispatch); // count only if backward branch
1871
1872 // ECN: FIXME: This code smells
1873 // check if MethodCounters exists
1874 Label has_counters;
1875 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1876 __ cbnz(rscratch1, has_counters);
1877 __ push(r0);
1878 __ push(r2);
1879 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
1880 InterpreterRuntime::build_method_counters), rmethod);
1881 __ pop(r2);
1882 __ pop(r0);
1883 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1884 __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
1885 __ bind(has_counters);
1886
1887 Label no_mdo;
1888 int increment = InvocationCounter::count_increment;
1889 if (ProfileInterpreter) {
1890 // Are we profiling?
1891 __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1892 __ cbz(r1, no_mdo);
1893 // Increment the MDO backedge counter
1894 const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
1895 in_bytes(InvocationCounter::counter_offset()));
1896 const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
1897 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1898 r0, rscratch1, false, Assembler::EQ,
1899 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1900 __ b(dispatch);
1901 }
1902 __ bind(no_mdo);
1903 // Increment backedge counter in MethodCounters*
1904 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1905 const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
1906 __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
1907 r0, rscratch2, false, Assembler::EQ,
1908 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1909 __ bind(dispatch);
1910 }
1911
1912 // Pre-load the next target bytecode into rscratch1
1913 __ load_unsigned_byte(rscratch1, Address(rbcp, 0));
1914
1915 // continue with the bytecode @ target
1916 // rscratch1: target bytecode
1917 // rbcp: target bcp
1918 __ dispatch_only(vtos, /*generate_poll*/true);
1919
1920 if (UseLoopCounter && UseOnStackReplacement) {
1921 // invocation counter overflow
1922 __ bind(backedge_counter_overflow);
1923 __ neg(r2, r2);
1924 __ add(r2, r2, rbcp); // branch bcp
1925 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1926 __ call_VM(noreg,
1927 CAST_FROM_FN_PTR(address,
1928 InterpreterRuntime::frequency_counter_overflow),
1929 r2);
1930 __ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
1931
1932 // r0: osr nmethod (osr ok) or null (osr not possible)
1933 // w1: target bytecode
1934 // r2: scratch
1935 __ cbz(r0, dispatch); // test result -- no osr if null
1936 // nmethod may have been invalidated (VM may block upon call_VM return)
1937 __ ldrb(r2, Address(r0, nmethod::state_offset()));
1938 if (nmethod::in_use != 0)
1939 __ sub(r2, r2, nmethod::in_use);
1940 __ cbnz(r2, dispatch);
1941
1942 // We have the address of an on stack replacement routine in r0
1943 // We need to prepare to execute the OSR method. First we must
1944 // migrate the locals and monitors off of the stack.
1945
1946 __ mov(r19, r0); // save the nmethod
1947
1948 JFR_ONLY(__ enter_jfr_critical_section();)
1949
1950 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1951
1952 // r0 is OSR buffer, move it to expected parameter location
1953 __ mov(j_rarg0, r0);
1954
1955 // remove activation
1956 // get sender esp
1957 __ ldr(esp,
1958 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
1959 // remove frame anchor
1960 __ leave();
1961
1962 JFR_ONLY(__ leave_jfr_critical_section();)
1963
1964 // Ensure compiled code always sees stack at proper alignment
1965 __ andr(sp, esp, -16);
1966
1967 // and begin the OSR nmethod
1968 __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
1969 __ br(rscratch1);
1970 }
1971 }
1972
1973
1974 void TemplateTable::if_0cmp(Condition cc)
1975 {
1976 transition(itos, vtos);
1977 // assume branch is more often taken than not (loops use backward branches)
1978 Label not_taken;
1979 if (cc == equal)
1980 __ cbnzw(r0, not_taken);
1981 else if (cc == not_equal)
1982 __ cbzw(r0, not_taken);
1983 else {
1984 __ andsw(zr, r0, r0);
1985 __ br(j_not(cc), not_taken);
1986 }
1987
1988 branch(false, false);
1989 __ bind(not_taken);
1990 __ profile_not_taken_branch(r0);
1991 }
1992
1993 void TemplateTable::if_icmp(Condition cc)
1994 {
1995 transition(itos, vtos);
1996 // assume branch is more often taken than not (loops use backward branches)
1997 Label not_taken;
1998 __ pop_i(r1);
1999 __ cmpw(r1, r0, Assembler::LSL);
2000 __ br(j_not(cc), not_taken);
2001 branch(false, false);
2002 __ bind(not_taken);
2003 __ profile_not_taken_branch(r0);
2004 }
2005
2006 void TemplateTable::if_nullcmp(Condition cc)
2007 {
2008 transition(atos, vtos);
2009 // assume branch is more often taken than not (loops use backward branches)
2010 Label not_taken;
2011 if (cc == equal)
2012 __ cbnz(r0, not_taken);
2013 else
2014 __ cbz(r0, not_taken);
2015 branch(false, false);
2016 __ bind(not_taken);
2017 __ profile_not_taken_branch(r0);
2018 }
2019
2020 void TemplateTable::if_acmp(Condition cc) {
2021 transition(atos, vtos);
2022 // assume branch is more often taken than not (loops use backward branches)
2023 Label taken, not_taken;
2024 __ pop_ptr(r1);
2025
2026 __ profile_acmp(r2, r1, r0, r4);
2027
2028 Register is_inline_type_mask = rscratch1;
2029 __ mov(is_inline_type_mask, markWord::inline_type_pattern);
2030
2031 if (Arguments::is_valhalla_enabled()) {
2032 // The substitutability test is only necessary if r1 and r0 are not the same...
2033 __ cmp(r1, r0);
2034 __ br(Assembler::EQ, (cc == equal) ? taken : not_taken);
2035
2036 // ... neither are null...
2037 __ cbz(r1, (cc == equal) ? not_taken : taken);
2038 __ cbz(r0, (cc == equal) ? not_taken : taken);
2039
2040 // ...and both are values...
2041 __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes()));
2042 __ andr(r2, r2, is_inline_type_mask);
2043 __ ldr(r4, Address(r0, oopDesc::mark_offset_in_bytes()));
2044 __ andr(r4, r4, is_inline_type_mask);
2045 __ andr(r2, r2, r4);
2046 __ cmp(r2, is_inline_type_mask);
2047 __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
2048
2049 // ...with the same value klass
2050 __ load_metadata(r2, r1);
2051 __ load_metadata(r4, r0);
2052 __ cmp(r2, r4);
2053 __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
2054
2055 // Know both are the same type, let's test for substitutability...
2056 if (cc == equal) {
2057 invoke_is_substitutable(r0, r1, taken, not_taken);
2058 } else {
2059 invoke_is_substitutable(r0, r1, not_taken, taken);
2060 }
2061 __ stop("Not reachable");
2062 }
2063
2064 __ cmpoop(r1, r0);
2065 __ br(j_not(cc), not_taken);
2066 __ bind(taken);
2067 branch(false, false);
2068 __ bind(not_taken);
2069 __ profile_not_taken_branch(r0, true);
2070 }
2071
2072 void TemplateTable::invoke_is_substitutable(Register aobj, Register bobj,
2073 Label& is_subst, Label& not_subst) {
2074
2075 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::is_substitutable), aobj, bobj);
2076 // Restored... r0 answer, jmp to outcome...
2077 __ cbz(r0, not_subst);
2078 __ b(is_subst);
2079 }
2080
2081
2082 void TemplateTable::ret() {
2083 transition(vtos, vtos);
2084 locals_index(r1);
2085 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2086 __ profile_ret(r1, r2);
2087 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2088 __ lea(rbcp, Address(rbcp, r1));
2089 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2090 __ dispatch_next(vtos, 0, /*generate_poll*/true);
2091 }
2092
2093 void TemplateTable::wide_ret() {
2094 transition(vtos, vtos);
2095 locals_index_wide(r1);
2096 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2097 __ profile_ret(r1, r2);
2098 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2099 __ lea(rbcp, Address(rbcp, r1));
2100 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2101 __ dispatch_next(vtos, 0, /*generate_poll*/true);
2102 }
2103
2104
2105 void TemplateTable::tableswitch() {
2106 Label default_case, continue_execution;
2107 transition(itos, vtos);
2108 // align rbcp
2109 __ lea(r1, at_bcp(BytesPerInt));
2110 __ andr(r1, r1, -BytesPerInt);
2111 // load lo & hi
2112 __ ldrw(r2, Address(r1, BytesPerInt));
2113 __ ldrw(r3, Address(r1, 2 * BytesPerInt));
2114 __ rev32(r2, r2);
2115 __ rev32(r3, r3);
2116 // check against lo & hi
2117 __ cmpw(r0, r2);
2118 __ br(Assembler::LT, default_case);
2119 __ cmpw(r0, r3);
2120 __ br(Assembler::GT, default_case);
2121 // lookup dispatch offset
2122 __ subw(r0, r0, r2);
2123 __ lea(r3, Address(r1, r0, Address::uxtw(2)));
2124 __ ldrw(r3, Address(r3, 3 * BytesPerInt));
2125 __ profile_switch_case(r0, r1, r2);
2126 // continue execution
2127 __ bind(continue_execution);
2128 __ rev32(r3, r3);
2129 __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
2130 __ add(rbcp, rbcp, r3, ext::sxtw);
2131 __ dispatch_only(vtos, /*generate_poll*/true);
2132 // handle default
2133 __ bind(default_case);
2134 __ profile_switch_default(r0);
2135 __ ldrw(r3, Address(r1, 0));
2136 __ b(continue_execution);
2137 }
2138
2139 void TemplateTable::lookupswitch() {
2140 transition(itos, itos);
2141 __ stop("lookupswitch bytecode should have been rewritten");
2142 }
2143
2144 void TemplateTable::fast_linearswitch() {
2145 transition(itos, vtos);
2146 Label loop_entry, loop, found, continue_execution;
2147 // bswap r0 so we can avoid bswapping the table entries
2148 __ rev32(r0, r0);
2149 // align rbcp
2150 __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2151 // this instruction (change offsets
2152 // below)
2153 __ andr(r19, r19, -BytesPerInt);
2154 // set counter
2155 __ ldrw(r1, Address(r19, BytesPerInt));
2156 __ rev32(r1, r1);
2157 __ b(loop_entry);
2158 // table search
2159 __ bind(loop);
2160 __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2161 __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
2162 __ cmpw(r0, rscratch1);
2163 __ br(Assembler::EQ, found);
2164 __ bind(loop_entry);
2165 __ subs(r1, r1, 1);
2166 __ br(Assembler::PL, loop);
2167 // default case
2168 __ profile_switch_default(r0);
2169 __ ldrw(r3, Address(r19, 0));
2170 __ b(continue_execution);
2171 // entry found -> get offset
2172 __ bind(found);
2173 __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2174 __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
2175 __ profile_switch_case(r1, r0, r19);
2176 // continue execution
2177 __ bind(continue_execution);
2178 __ rev32(r3, r3);
2179 __ add(rbcp, rbcp, r3, ext::sxtw);
2180 __ ldrb(rscratch1, Address(rbcp, 0));
2181 __ dispatch_only(vtos, /*generate_poll*/true);
2182 }
2183
2184 void TemplateTable::fast_binaryswitch() {
2185 transition(itos, vtos);
2186 // Implementation using the following core algorithm:
2187 //
2188 // int binary_search(int key, LookupswitchPair* array, int n) {
2189 // // Binary search according to "Methodik des Programmierens" by
2190 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2191 // int i = 0;
2192 // int j = n;
2193 // while (i+1 < j) {
2194 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2195 // // with Q: for all i: 0 <= i < n: key < a[i]
2196 // // where a stands for the array and assuming that the (inexisting)
2197 // // element a[n] is infinitely big.
2198 // int h = (i + j) >> 1;
2199 // // i < h < j
2200 // if (key < array[h].fast_match()) {
2201 // j = h;
2202 // } else {
2203 // i = h;
2204 // }
2205 // }
2206 // // R: a[i] <= key < a[i+1] or Q
2207 // // (i.e., if key is within array, i is the correct index)
2208 // return i;
2209 // }
2210
2211 // Register allocation
2212 const Register key = r0; // already set (tosca)
2213 const Register array = r1;
2214 const Register i = r2;
2215 const Register j = r3;
2216 const Register h = rscratch1;
2217 const Register temp = rscratch2;
2218
2219 // Find array start
2220 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2221 // get rid of this
2222 // instruction (change
2223 // offsets below)
2224 __ andr(array, array, -BytesPerInt);
2225
2226 // Initialize i & j
2227 __ mov(i, 0); // i = 0;
2228 __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
2229
2230 // Convert j into native byteordering
2231 __ rev32(j, j);
2232
2233 // And start
2234 Label entry;
2235 __ b(entry);
2236
2237 // binary search loop
2238 {
2239 Label loop;
2240 __ bind(loop);
2241 // int h = (i + j) >> 1;
2242 __ addw(h, i, j); // h = i + j;
2243 __ lsrw(h, h, 1); // h = (i + j) >> 1;
2244 // if (key < array[h].fast_match()) {
2245 // j = h;
2246 // } else {
2247 // i = h;
2248 // }
2249 // Convert array[h].match to native byte-ordering before compare
2250 __ ldr(temp, Address(array, h, Address::lsl(3)));
2251 __ rev32(temp, temp);
2252 __ cmpw(key, temp);
2253 // j = h if (key < array[h].fast_match())
2254 __ csel(j, h, j, Assembler::LT);
2255 // i = h if (key >= array[h].fast_match())
2256 __ csel(i, h, i, Assembler::GE);
2257 // while (i+1 < j)
2258 __ bind(entry);
2259 __ addw(h, i, 1); // i+1
2260 __ cmpw(h, j); // i+1 < j
2261 __ br(Assembler::LT, loop);
2262 }
2263
2264 // end of binary search, result index is i (must check again!)
2265 Label default_case;
2266 // Convert array[i].match to native byte-ordering before compare
2267 __ ldr(temp, Address(array, i, Address::lsl(3)));
2268 __ rev32(temp, temp);
2269 __ cmpw(key, temp);
2270 __ br(Assembler::NE, default_case);
2271
2272 // entry found -> j = offset
2273 __ add(j, array, i, ext::uxtx, 3);
2274 __ ldrw(j, Address(j, BytesPerInt));
2275 __ profile_switch_case(i, key, array);
2276 __ rev32(j, j);
2277 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2278 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2279 __ dispatch_only(vtos, /*generate_poll*/true);
2280
2281 // default case -> j = default offset
2282 __ bind(default_case);
2283 __ profile_switch_default(i);
2284 __ ldrw(j, Address(array, -2 * BytesPerInt));
2285 __ rev32(j, j);
2286 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2287 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2288 __ dispatch_only(vtos, /*generate_poll*/true);
2289 }
2290
2291
2292 void TemplateTable::_return(TosState state)
2293 {
2294 transition(state, state);
2295 assert(_desc->calls_vm(),
2296 "inconsistent calls_vm information"); // call in remove_activation
2297
2298 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2299 assert(state == vtos, "only valid state");
2300
2301 __ ldr(c_rarg1, aaddress(0));
2302 __ load_klass(r3, c_rarg1);
2303 __ ldrb(r3, Address(r3, Klass::misc_flags_offset()));
2304 Label skip_register_finalizer;
2305 __ tbz(r3, exact_log2(KlassFlags::_misc_has_finalizer), skip_register_finalizer);
2306
2307 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2308
2309 __ bind(skip_register_finalizer);
2310 }
2311
2312 // Issue a StoreStore barrier after all stores but before return
2313 // from any constructor for any class with a final field. We don't
2314 // know if this is a finalizer, so we always do so.
2315 if (_desc->bytecode() == Bytecodes::_return
2316 || _desc->bytecode() == Bytecodes::_return_register_finalizer)
2317 __ membar(MacroAssembler::StoreStore);
2318
2319 if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2320 Label no_safepoint;
2321 __ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
2322 __ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), no_safepoint);
2323 __ push(state);
2324 __ push_cont_fastpath(rthread);
2325 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
2326 __ pop_cont_fastpath(rthread);
2327 __ pop(state);
2328 __ bind(no_safepoint);
2329 }
2330
2331 // Narrow result if state is itos but result type is smaller.
2332 // Need to narrow in the return bytecode rather than in generate_return_entry
2333 // since compiled code callers expect the result to already be narrowed.
2334 if (state == itos) {
2335 __ narrow(r0);
2336 }
2337
2338 __ remove_activation(state);
2339 __ ret(lr);
2340 }
2341
2342 // ----------------------------------------------------------------------------
2343 // Volatile variables demand their effects be made known to all CPU's
2344 // in order. Store buffers on most chips allow reads & writes to
2345 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2346 // without some kind of memory barrier (i.e., it's not sufficient that
2347 // the interpreter does not reorder volatile references, the hardware
2348 // also must not reorder them).
2349 //
2350 // According to the new Java Memory Model (JMM):
2351 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2352 // writes act as acquire & release, so:
2353 // (2) A read cannot let unrelated NON-volatile memory refs that
2354 // happen after the read float up to before the read. It's OK for
2355 // non-volatile memory refs that happen before the volatile read to
2356 // float down below it.
2357 // (3) Similar a volatile write cannot let unrelated NON-volatile
2358 // memory refs that happen BEFORE the write float down to after the
2359 // write. It's OK for non-volatile memory refs that happen after the
2360 // volatile write to float up before it.
2361 //
2362 // We only put in barriers around volatile refs (they are expensive),
2363 // not _between_ memory refs (that would require us to track the
2364 // flavor of the previous memory refs). Requirements (2) and (3)
2365 // require some barriers before volatile stores and after volatile
2366 // loads. These nearly cover requirement (1) but miss the
2367 // volatile-store-volatile-load case. This final case is placed after
2368 // volatile-stores although it could just as well go before
2369 // volatile-loads.
2370
2371 void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
2372 Register Rcache,
2373 Register index) {
2374 const Register temp = r19;
2375 assert_different_registers(Rcache, index, temp);
2376 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2377
2378 Label L_clinit_barrier_slow, L_done;
2379
2380 Bytecodes::Code code = bytecode();
2381 __ load_method_entry(Rcache, index);
2382 switch(byte_no) {
2383 case f1_byte:
2384 __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset())));
2385 break;
2386 case f2_byte:
2387 __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset())));
2388 break;
2389 }
2390 // Load-acquire the bytecode to match store-release in InterpreterRuntime
2391 __ ldarb(temp, temp);
2392 __ subs(zr, temp, (int) code); // have we resolved this bytecode?
2393
2394 // Class initialization barrier for static methods
2395 if (bytecode() == Bytecodes::_invokestatic) {
2396 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
2397 __ br(Assembler::NE, L_clinit_barrier_slow);
2398 __ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
2399 __ load_method_holder(temp, temp);
2400 __ clinit_barrier(temp, rscratch1, &L_done, /*L_slow_path*/ nullptr);
2401 __ bind(L_clinit_barrier_slow);
2402 } else {
2403 __ br(Assembler::EQ, L_done);
2404 }
2405
2406 // resolve first time through
2407 // Class initialization barrier slow path lands here as well.
2408 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2409 __ mov(temp, (int) code);
2410 __ call_VM_preemptable(noreg, entry, temp);
2411
2412 // Update registers with resolved info
2413 __ load_method_entry(Rcache, index);
2414 // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2415 // so all clients ofthis method must be modified accordingly
2416 __ bind(L_done);
2417 }
2418
2419 void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
2420 Register Rcache,
2421 Register index) {
2422 const Register temp = r19;
2423 assert_different_registers(Rcache, index, temp);
2424
2425 Label L_clinit_barrier_slow, L_done;
2426
2427 Bytecodes::Code code = bytecode();
2428 switch (code) {
2429 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2430 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2431 default: break;
2432 }
2433
2434 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2435 __ load_field_entry(Rcache, index);
2436 if (byte_no == f1_byte) {
2437 __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::get_code_offset())));
2438 } else {
2439 __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::put_code_offset())));
2440 }
2441 // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
2442 __ ldarb(temp, temp);
2443 __ subs(zr, temp, (int) code); // have we resolved this bytecode?
2444
2445 // Class initialization barrier for static fields
2446 if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
2447 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
2448 const Register field_holder = temp;
2449
2450 __ br(Assembler::NE, L_clinit_barrier_slow);
2451 __ ldr(field_holder, Address(Rcache, in_bytes(ResolvedFieldEntry::field_holder_offset())));
2452 __ clinit_barrier(field_holder, rscratch1, &L_done, /*L_slow_path*/ nullptr);
2453 __ bind(L_clinit_barrier_slow);
2454 } else {
2455 __ br(Assembler::EQ, L_done);
2456 }
2457
2458 // resolve first time through
2459 // Class initialization barrier slow path lands here as well.
2460 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2461 __ mov(temp, (int) code);
2462 __ call_VM_preemptable(noreg, entry, temp);
2463
2464 // Update registers with resolved info
2465 __ load_field_entry(Rcache, index);
2466 __ bind(L_done);
2467 }
2468
2469 void TemplateTable::load_resolved_field_entry(Register obj,
2470 Register cache,
2471 Register tos_state,
2472 Register offset,
2473 Register flags,
2474 bool is_static = false) {
2475 assert_different_registers(cache, tos_state, flags, offset);
2476
2477 // Field offset
2478 __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
2479
2480 // Flags
2481 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset())));
2482
2483 // TOS state
2484 if (tos_state != noreg) {
2485 __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset())));
2486 }
2487
2488 // Klass overwrite register
2489 if (is_static) {
2490 __ ldr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset()));
2491 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2492 __ ldr(obj, Address(obj, mirror_offset));
2493 __ resolve_oop_handle(obj, r5, rscratch2);
2494 }
2495 }
2496
2497 void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
2498 Register method,
2499 Register flags) {
2500
2501 // setup registers
2502 const Register index = flags;
2503 assert_different_registers(method, cache, flags);
2504
2505 // determine constant pool cache field offsets
2506 resolve_cache_and_index_for_method(f1_byte, cache, index);
2507 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2508 __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2509 }
2510
2511 void TemplateTable::load_resolved_method_entry_handle(Register cache,
2512 Register method,
2513 Register ref_index,
2514 Register flags) {
2515 // setup registers
2516 const Register index = ref_index;
2517 assert_different_registers(method, flags);
2518 assert_different_registers(method, cache, index);
2519
2520 // determine constant pool cache field offsets
2521 resolve_cache_and_index_for_method(f1_byte, cache, index);
2522 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2523
2524 // maybe push appendix to arguments (just before return address)
2525 Label L_no_push;
2526 __ tbz(flags, ResolvedMethodEntry::has_appendix_shift, L_no_push);
2527 // invokehandle uses an index into the resolved references array
2528 __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
2529 // Push the appendix as a trailing parameter.
2530 // This must be done before we get the receiver,
2531 // since the parameter_size includes it.
2532 Register appendix = method;
2533 __ load_resolved_reference_at_index(appendix, ref_index);
2534 __ push(appendix); // push appendix (MethodType, CallSite, etc.)
2535 __ bind(L_no_push);
2536
2537 __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2538 }
2539
2540 void TemplateTable::load_resolved_method_entry_interface(Register cache,
2541 Register klass,
2542 Register method_or_table_index,
2543 Register flags) {
2544 // setup registers
2545 const Register index = method_or_table_index;
2546 assert_different_registers(method_or_table_index, cache, flags);
2547
2548 // determine constant pool cache field offsets
2549 resolve_cache_and_index_for_method(f1_byte, cache, index);
2550 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2551
2552 // Invokeinterface can behave in different ways:
2553 // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
2554 // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
2555 // vtable index is placed in the register.
2556 // Otherwise, the registers will be populated with the klass and method.
2557
2558 Label NotVirtual; Label NotVFinal; Label Done;
2559 __ tbz(flags, ResolvedMethodEntry::is_forced_virtual_shift, NotVirtual);
2560 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2561 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2562 __ b(Done);
2563
2564 __ bind(NotVFinal);
2565 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2566 __ b(Done);
2567
2568 __ bind(NotVirtual);
2569 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2570 __ ldr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
2571 __ bind(Done);
2572 }
2573
2574 void TemplateTable::load_resolved_method_entry_virtual(Register cache,
2575 Register method_or_table_index,
2576 Register flags) {
2577 // setup registers
2578 const Register index = flags;
2579 assert_different_registers(method_or_table_index, cache, flags);
2580
2581 // determine constant pool cache field offsets
2582 resolve_cache_and_index_for_method(f2_byte, cache, index);
2583 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2584
2585 // method_or_table_index can either be an itable index or a method depending on the virtual final flag
2586 Label NotVFinal; Label Done;
2587 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2588 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2589 __ b(Done);
2590
2591 __ bind(NotVFinal);
2592 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2593 __ bind(Done);
2594 }
2595
2596 // The rmethod register is input and overwritten to be the adapter method for the
2597 // indy call. Link Register (lr) is set to the return address for the adapter and
2598 // an appendix may be pushed to the stack. Registers r0-r3 are clobbered
2599 void TemplateTable::load_invokedynamic_entry(Register method) {
2600 // setup registers
2601 const Register appendix = r0;
2602 const Register cache = r2;
2603 const Register index = r3;
2604 assert_different_registers(method, appendix, cache, index, rcpool);
2605
2606 __ save_bcp();
2607
2608 Label resolved;
2609
2610 __ load_resolved_indy_entry(cache, index);
2611 // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2612 __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2613 __ ldar(method, method);
2614
2615 // Compare the method to zero
2616 __ cbnz(method, resolved);
2617
2618 Bytecodes::Code code = bytecode();
2619
2620 // Call to the interpreter runtime to resolve invokedynamic
2621 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2622 __ mov(method, code); // this is essentially Bytecodes::_invokedynamic
2623 __ call_VM(noreg, entry, method);
2624 // Update registers with resolved info
2625 __ load_resolved_indy_entry(cache, index);
2626 // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2627 __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2628 __ ldar(method, method);
2629
2630 #ifdef ASSERT
2631 __ cbnz(method, resolved);
2632 __ stop("Should be resolved by now");
2633 #endif // ASSERT
2634 __ bind(resolved);
2635
2636 Label L_no_push;
2637 // Check if there is an appendix
2638 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())));
2639 __ tbz(index, ResolvedIndyEntry::has_appendix_shift, L_no_push);
2640
2641 // Get appendix
2642 __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())));
2643 // Push the appendix as a trailing parameter
2644 // since the parameter_size includes it.
2645 __ push(method);
2646 __ mov(method, index);
2647 __ load_resolved_reference_at_index(appendix, method);
2648 __ verify_oop(appendix);
2649 __ pop(method);
2650 __ push(appendix); // push appendix (MethodType, CallSite, etc.)
2651 __ bind(L_no_push);
2652
2653 // compute return type
2654 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())));
2655 // load return address
2656 // Return address is loaded into link register(lr) and not pushed to the stack
2657 // like x86
2658 {
2659 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2660 __ mov(rscratch1, table_addr);
2661 __ ldr(lr, Address(rscratch1, index, Address::lsl(3)));
2662 }
2663 }
2664
2665 // The registers cache and index expected to be set before call.
2666 // Correct values of the cache and index registers are preserved.
2667 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2668 bool is_static, bool has_tos) {
2669 // do the JVMTI work here to avoid disturbing the register state below
2670 // We use c_rarg registers here because we want to use the register used in
2671 // the call to the VM
2672 if (JvmtiExport::can_post_field_access()) {
2673 // Check to see if a field access watch has been set before we
2674 // take the time to call into the VM.
2675 Label L1;
2676 assert_different_registers(cache, index, r0);
2677 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2678 __ ldrw(r0, Address(rscratch1));
2679 __ cbzw(r0, L1);
2680
2681 __ load_field_entry(c_rarg2, index);
2682
2683 if (is_static) {
2684 __ mov(c_rarg1, zr); // null object reference
2685 } else {
2686 __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
2687 __ verify_oop(c_rarg1);
2688 }
2689 // c_rarg1: object pointer or null
2690 // c_rarg2: cache entry pointer
2691 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2692 InterpreterRuntime::post_field_access),
2693 c_rarg1, c_rarg2);
2694 __ load_field_entry(cache, index);
2695 __ bind(L1);
2696 }
2697 }
2698
2699 void TemplateTable::pop_and_check_object(Register r)
2700 {
2701 __ pop_ptr(r);
2702 __ null_check(r); // for field access must check obj.
2703 __ verify_oop(r);
2704 }
2705
2706 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2707 {
2708 const Register cache = r2;
2709 const Register obj = r4;
2710 const Register index = r3;
2711 const Register tos_state = r3;
2712 const Register off = r19;
2713 const Register flags = r6;
2714 const Register bc = r4; // uses same reg as obj, so don't mix them
2715
2716 resolve_cache_and_index_for_field(byte_no, cache, index);
2717 jvmti_post_field_access(cache, index, is_static, false);
2718
2719 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2720
2721 if (!is_static) {
2722 // obj is on the stack
2723 pop_and_check_object(obj);
2724 }
2725
2726 // 8179954: We need to make sure that the code generated for
2727 // volatile accesses forms a sequentially-consistent set of
2728 // operations when combined with STLR and LDAR. Without a leading
2729 // membar it's possible for a simple Dekker test to fail if loads
2730 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
2731 // the stores in one method and we interpret the loads in another.
2732 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){
2733 Label notVolatile;
2734 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2735 __ membar(MacroAssembler::AnyAny);
2736 __ bind(notVolatile);
2737 }
2738
2739 const Address field(obj, off);
2740
2741 Label Done, notByte, notBool, notInt, notShort, notChar,
2742 notLong, notFloat, notObj, notDouble;
2743
2744 assert(btos == 0, "change code, btos != 0");
2745 __ cbnz(tos_state, notByte);
2746
2747 // Don't rewrite getstatic, only getfield
2748 if (is_static) rc = may_not_rewrite;
2749
2750 // btos
2751 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2752 __ push(btos);
2753 // Rewrite bytecode to be faster
2754 if (rc == may_rewrite) {
2755 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2756 }
2757 __ b(Done);
2758
2759 __ bind(notByte);
2760 __ cmp(tos_state, (u1)ztos);
2761 __ br(Assembler::NE, notBool);
2762
2763 // ztos (same code as btos)
2764 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2765 __ push(ztos);
2766 // Rewrite bytecode to be faster
2767 if (rc == may_rewrite) {
2768 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2769 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2770 }
2771 __ b(Done);
2772
2773 __ bind(notBool);
2774 __ cmp(tos_state, (u1)atos);
2775 __ br(Assembler::NE, notObj);
2776 // atos
2777 if (!Arguments::is_valhalla_enabled()) {
2778 do_oop_load(_masm, field, r0, IN_HEAP);
2779 __ push(atos);
2780 if (rc == may_rewrite) {
2781 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2782 }
2783 __ b(Done);
2784 } else { // Valhalla
2785 if (is_static) {
2786 __ load_heap_oop(r0, field, rscratch1, rscratch2);
2787 __ push(atos);
2788 __ b(Done);
2789 } else {
2790 Label is_flat;
2791 __ test_field_is_flat(flags, noreg /* temp */, is_flat);
2792 __ load_heap_oop(r0, field, rscratch1, rscratch2);
2793 __ push(atos);
2794 if (rc == may_rewrite) {
2795 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2796 }
2797 __ b(Done);
2798 __ bind(is_flat);
2799 // field is flat (null-free or nullable with a null-marker)
2800 __ mov(r0, obj);
2801 __ read_flat_field(cache, r0);
2802 __ verify_oop(r0);
2803 __ push(atos);
2804 if (rc == may_rewrite) {
2805 patch_bytecode(Bytecodes::_fast_vgetfield, bc, r1);
2806 }
2807 __ b(Done);
2808 }
2809 }
2810
2811 __ bind(notObj);
2812 __ cmp(tos_state, (u1)itos);
2813 __ br(Assembler::NE, notInt);
2814 // itos
2815 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2816 __ push(itos);
2817 // Rewrite bytecode to be faster
2818 if (rc == may_rewrite) {
2819 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2820 }
2821 __ b(Done);
2822
2823 __ bind(notInt);
2824 __ cmp(tos_state, (u1)ctos);
2825 __ br(Assembler::NE, notChar);
2826 // ctos
2827 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2828 __ push(ctos);
2829 // Rewrite bytecode to be faster
2830 if (rc == may_rewrite) {
2831 patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2832 }
2833 __ b(Done);
2834
2835 __ bind(notChar);
2836 __ cmp(tos_state, (u1)stos);
2837 __ br(Assembler::NE, notShort);
2838 // stos
2839 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
2840 __ push(stos);
2841 // Rewrite bytecode to be faster
2842 if (rc == may_rewrite) {
2843 patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2844 }
2845 __ b(Done);
2846
2847 __ bind(notShort);
2848 __ cmp(tos_state, (u1)ltos);
2849 __ br(Assembler::NE, notLong);
2850 // ltos
2851 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
2852 __ push(ltos);
2853 // Rewrite bytecode to be faster
2854 if (rc == may_rewrite) {
2855 patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2856 }
2857 __ b(Done);
2858
2859 __ bind(notLong);
2860 __ cmp(tos_state, (u1)ftos);
2861 __ br(Assembler::NE, notFloat);
2862 // ftos
2863 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2864 __ push(ftos);
2865 // Rewrite bytecode to be faster
2866 if (rc == may_rewrite) {
2867 patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2868 }
2869 __ b(Done);
2870
2871 __ bind(notFloat);
2872 #ifdef ASSERT
2873 __ cmp(tos_state, (u1)dtos);
2874 __ br(Assembler::NE, notDouble);
2875 #endif
2876 // dtos
2877 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2878 __ push(dtos);
2879 // Rewrite bytecode to be faster
2880 if (rc == may_rewrite) {
2881 patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2882 }
2883 #ifdef ASSERT
2884 __ b(Done);
2885
2886 __ bind(notDouble);
2887 __ stop("Bad state");
2888 #endif
2889
2890 __ bind(Done);
2891
2892 Label notVolatile;
2893 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2894 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2895 __ bind(notVolatile);
2896 }
2897
2898
2899 void TemplateTable::getfield(int byte_no)
2900 {
2901 getfield_or_static(byte_no, false);
2902 }
2903
2904 void TemplateTable::nofast_getfield(int byte_no) {
2905 getfield_or_static(byte_no, false, may_not_rewrite);
2906 }
2907
2908 void TemplateTable::getstatic(int byte_no)
2909 {
2910 getfield_or_static(byte_no, true);
2911 }
2912
2913 // The registers cache and index expected to be set before call.
2914 // The function may destroy various registers, just not the cache and index registers.
2915 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2916 transition(vtos, vtos);
2917
2918 if (JvmtiExport::can_post_field_modification()) {
2919 // Check to see if a field modification watch has been set before
2920 // we take the time to call into the VM.
2921 Label L1;
2922 assert_different_registers(cache, index, r0);
2923 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2924 __ ldrw(r0, Address(rscratch1));
2925 __ cbz(r0, L1);
2926
2927 __ mov(c_rarg2, cache);
2928
2929 if (is_static) {
2930 // Life is simple. Null out the object pointer.
2931 __ mov(c_rarg1, zr);
2932 } else {
2933 // Life is harder. The stack holds the value on top, followed by
2934 // the object. We don't know the size of the value, though; it
2935 // could be one or two words depending on its type. As a result,
2936 // we must find the type to determine where the object is.
2937 __ load_unsigned_byte(c_rarg3, Address(c_rarg2, in_bytes(ResolvedFieldEntry::type_offset())));
2938 Label nope2, done, ok;
2939 __ ldr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2940 __ cmpw(c_rarg3, ltos);
2941 __ br(Assembler::EQ, ok);
2942 __ cmpw(c_rarg3, dtos);
2943 __ br(Assembler::NE, nope2);
2944 __ bind(ok);
2945 __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2946 __ bind(nope2);
2947 }
2948 // object (tos)
2949 __ mov(c_rarg3, esp);
2950 // c_rarg1: object pointer set up above (null if static)
2951 // c_rarg2: cache entry pointer
2952 // c_rarg3: jvalue object on the stack
2953 __ call_VM(noreg,
2954 CAST_FROM_FN_PTR(address,
2955 InterpreterRuntime::post_field_modification),
2956 c_rarg1, c_rarg2, c_rarg3);
2957 __ load_field_entry(cache, index);
2958 __ bind(L1);
2959 }
2960 }
2961
2962 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2963 transition(vtos, vtos);
2964
2965 const Register cache = r2;
2966 const Register index = r3;
2967 const Register tos_state = r3;
2968 const Register obj = r2;
2969 const Register off = r19;
2970 const Register flags = r6;
2971 const Register bc = r4;
2972
2973 resolve_cache_and_index_for_field(byte_no, cache, index);
2974 jvmti_post_field_mod(cache, index, is_static);
2975 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2976
2977 Label Done;
2978 {
2979 Label notVolatile;
2980 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2981 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2982 __ bind(notVolatile);
2983 }
2984
2985 // field address
2986 const Address field(obj, off);
2987
2988 Label notByte, notBool, notInt, notShort, notChar,
2989 notLong, notFloat, notObj, notDouble;
2990
2991 assert(btos == 0, "change code, btos != 0");
2992 __ cbnz(tos_state, notByte);
2993
2994 // Don't rewrite putstatic, only putfield
2995 if (is_static) rc = may_not_rewrite;
2996
2997 // btos
2998 {
2999 __ pop(btos);
3000 if (!is_static) pop_and_check_object(obj);
3001 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
3002 if (rc == may_rewrite) {
3003 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
3004 }
3005 __ b(Done);
3006 }
3007
3008 __ bind(notByte);
3009 __ cmp(tos_state, (u1)ztos);
3010 __ br(Assembler::NE, notBool);
3011
3012 // ztos
3013 {
3014 __ pop(ztos);
3015 if (!is_static) pop_and_check_object(obj);
3016 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
3017 if (rc == may_rewrite) {
3018 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
3019 }
3020 __ b(Done);
3021 }
3022
3023 __ bind(notBool);
3024 __ cmp(tos_state, (u1)atos);
3025 __ br(Assembler::NE, notObj);
3026
3027 // atos
3028 {
3029 if (!Arguments::is_valhalla_enabled()) {
3030 __ pop(atos);
3031 if (!is_static) pop_and_check_object(obj);
3032 // Store into the field
3033 // Clobbers: r10, r11, r3
3034 do_oop_store(_masm, field, r0, IN_HEAP);
3035 if (rc == may_rewrite) {
3036 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
3037 }
3038 __ b(Done);
3039 } else { // Valhalla
3040 __ pop(atos);
3041 if (is_static) {
3042 Label is_nullable;
3043 __ test_field_is_not_null_free_inline_type(flags, noreg /* temp */, is_nullable);
3044 __ null_check(r0); // FIXME JDK-8341120
3045 __ bind(is_nullable);
3046 do_oop_store(_masm, field, r0, IN_HEAP);
3047 __ b(Done);
3048 } else {
3049 Label null_free_reference, is_flat, rewrite_inline;
3050 __ test_field_is_flat(flags, noreg /* temp */, is_flat);
3051 __ test_field_is_null_free_inline_type(flags, noreg /* temp */, null_free_reference);
3052 pop_and_check_object(obj);
3053 // Store into the field
3054 // Clobbers: r10, r11, r3
3055 do_oop_store(_masm, field, r0, IN_HEAP);
3056 if (rc == may_rewrite) {
3057 patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no);
3058 }
3059 __ b(Done);
3060 // Implementation of the inline type semantic
3061 __ bind(null_free_reference);
3062 __ null_check(r0); // FIXME JDK-8341120
3063 pop_and_check_object(obj);
3064 // Store into the field
3065 // Clobbers: r10, r11, r3
3066 do_oop_store(_masm, field, r0, IN_HEAP);
3067 __ b(rewrite_inline);
3068 __ bind(is_flat);
3069 pop_and_check_object(r7);
3070 __ write_flat_field(cache, off, index, flags, r7);
3071 __ bind(rewrite_inline);
3072 if (rc == may_rewrite) {
3073 patch_bytecode(Bytecodes::_fast_vputfield, bc, r19, true, byte_no);
3074 }
3075 __ b(Done);
3076 }
3077 } // Valhalla
3078 }
3079
3080 __ bind(notObj);
3081 __ cmp(tos_state, (u1)itos);
3082 __ br(Assembler::NE, notInt);
3083
3084 // itos
3085 {
3086 __ pop(itos);
3087 if (!is_static) pop_and_check_object(obj);
3088 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
3089 if (rc == may_rewrite) {
3090 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
3091 }
3092 __ b(Done);
3093 }
3094
3095 __ bind(notInt);
3096 __ cmp(tos_state, (u1)ctos);
3097 __ br(Assembler::NE, notChar);
3098
3099 // ctos
3100 {
3101 __ pop(ctos);
3102 if (!is_static) pop_and_check_object(obj);
3103 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
3104 if (rc == may_rewrite) {
3105 patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
3106 }
3107 __ b(Done);
3108 }
3109
3110 __ bind(notChar);
3111 __ cmp(tos_state, (u1)stos);
3112 __ br(Assembler::NE, notShort);
3113
3114 // stos
3115 {
3116 __ pop(stos);
3117 if (!is_static) pop_and_check_object(obj);
3118 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
3119 if (rc == may_rewrite) {
3120 patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
3121 }
3122 __ b(Done);
3123 }
3124
3125 __ bind(notShort);
3126 __ cmp(tos_state, (u1)ltos);
3127 __ br(Assembler::NE, notLong);
3128
3129 // ltos
3130 {
3131 __ pop(ltos);
3132 if (!is_static) pop_and_check_object(obj);
3133 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
3134 if (rc == may_rewrite) {
3135 patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
3136 }
3137 __ b(Done);
3138 }
3139
3140 __ bind(notLong);
3141 __ cmp(tos_state, (u1)ftos);
3142 __ br(Assembler::NE, notFloat);
3143
3144 // ftos
3145 {
3146 __ pop(ftos);
3147 if (!is_static) pop_and_check_object(obj);
3148 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3149 if (rc == may_rewrite) {
3150 patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
3151 }
3152 __ b(Done);
3153 }
3154
3155 __ bind(notFloat);
3156 #ifdef ASSERT
3157 __ cmp(tos_state, (u1)dtos);
3158 __ br(Assembler::NE, notDouble);
3159 #endif
3160
3161 // dtos
3162 {
3163 __ pop(dtos);
3164 if (!is_static) pop_and_check_object(obj);
3165 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
3166 if (rc == may_rewrite) {
3167 patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
3168 }
3169 }
3170
3171 #ifdef ASSERT
3172 __ b(Done);
3173
3174 __ bind(notDouble);
3175 __ stop("Bad state");
3176 #endif
3177
3178 __ bind(Done);
3179
3180 {
3181 Label notVolatile;
3182 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3183 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
3184 __ bind(notVolatile);
3185 }
3186 }
3187
3188 void TemplateTable::putfield(int byte_no)
3189 {
3190 putfield_or_static(byte_no, false);
3191 }
3192
3193 void TemplateTable::nofast_putfield(int byte_no) {
3194 putfield_or_static(byte_no, false, may_not_rewrite);
3195 }
3196
3197 void TemplateTable::putstatic(int byte_no) {
3198 putfield_or_static(byte_no, true);
3199 }
3200
3201 void TemplateTable::jvmti_post_fast_field_mod() {
3202 if (JvmtiExport::can_post_field_modification()) {
3203 // Check to see if a field modification watch has been set before
3204 // we take the time to call into the VM.
3205 Label L2;
3206 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3207 __ ldrw(c_rarg3, Address(rscratch1));
3208 __ cbzw(c_rarg3, L2);
3209 __ pop_ptr(r19); // copy the object pointer from tos
3210 __ verify_oop(r19);
3211 __ push_ptr(r19); // put the object pointer back on tos
3212 // Save tos values before call_VM() clobbers them. Since we have
3213 // to do it for every data type, we use the saved values as the
3214 // jvalue object.
3215 switch (bytecode()) { // load values into the jvalue object
3216 case Bytecodes::_fast_vputfield: // fall through
3217 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
3218 case Bytecodes::_fast_bputfield: // fall through
3219 case Bytecodes::_fast_zputfield: // fall through
3220 case Bytecodes::_fast_sputfield: // fall through
3221 case Bytecodes::_fast_cputfield: // fall through
3222 case Bytecodes::_fast_iputfield: __ push_i(r0); break;
3223 case Bytecodes::_fast_dputfield: __ push_d(); break;
3224 case Bytecodes::_fast_fputfield: __ push_f(); break;
3225 case Bytecodes::_fast_lputfield: __ push_l(r0); break;
3226
3227 default:
3228 ShouldNotReachHere();
3229 }
3230 __ mov(c_rarg3, esp); // points to jvalue on the stack
3231 // access constant pool cache entry
3232 __ load_field_entry(c_rarg2, r0);
3233 __ verify_oop(r19);
3234 // r19: object pointer copied above
3235 // c_rarg2: cache entry pointer
3236 // c_rarg3: jvalue object on the stack
3237 __ call_VM(noreg,
3238 CAST_FROM_FN_PTR(address,
3239 InterpreterRuntime::post_field_modification),
3240 r19, c_rarg2, c_rarg3);
3241
3242 switch (bytecode()) { // restore tos values
3243 case Bytecodes::_fast_vputfield: // fall through
3244 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
3245 case Bytecodes::_fast_bputfield: // fall through
3246 case Bytecodes::_fast_zputfield: // fall through
3247 case Bytecodes::_fast_sputfield: // fall through
3248 case Bytecodes::_fast_cputfield: // fall through
3249 case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
3250 case Bytecodes::_fast_dputfield: __ pop_d(); break;
3251 case Bytecodes::_fast_fputfield: __ pop_f(); break;
3252 case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
3253 default: break;
3254 }
3255 __ bind(L2);
3256 }
3257 }
3258
3259 void TemplateTable::fast_storefield(TosState state)
3260 {
3261 transition(state, vtos);
3262
3263 ByteSize base = ConstantPoolCache::base_offset();
3264
3265 jvmti_post_fast_field_mod();
3266
3267 // access constant pool cache
3268 __ load_field_entry(r2, r1);
3269
3270 // R1: field offset, R2: field holder, R5: flags
3271 load_resolved_field_entry(r2, r2, noreg, r1, r5);
3272 __ verify_field_offset(r1);
3273
3274 {
3275 Label notVolatile;
3276 __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3277 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
3278 __ bind(notVolatile);
3279 }
3280
3281 Label notVolatile;
3282
3283 // Get object from stack
3284 pop_and_check_object(r2);
3285
3286 // field address
3287 const Address field(r2, r1);
3288
3289 // access field
3290 switch (bytecode()) {
3291 case Bytecodes::_fast_vputfield:
3292 {
3293 Label is_flat, done;
3294 __ test_field_is_flat(r5, noreg /* temp */, is_flat);
3295 __ null_check(r0);
3296 do_oop_store(_masm, field, r0, IN_HEAP);
3297 __ b(done);
3298 __ bind(is_flat);
3299 __ load_field_entry(r4, r5);
3300 // Re-shuffle registers because of VM calls calling convention
3301 __ mov(r19, r1);
3302 __ mov(r7, r2);
3303 __ write_flat_field(r4, r19, r6, r8, r7);
3304 __ bind(done);
3305 }
3306 break;
3307 case Bytecodes::_fast_aputfield:
3308 // Clobbers: r10, r11, r3
3309 do_oop_store(_masm, field, r0, IN_HEAP);
3310 break;
3311 case Bytecodes::_fast_lputfield:
3312 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
3313 break;
3314 case Bytecodes::_fast_iputfield:
3315 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
3316 break;
3317 case Bytecodes::_fast_zputfield:
3318 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
3319 break;
3320 case Bytecodes::_fast_bputfield:
3321 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
3322 break;
3323 case Bytecodes::_fast_sputfield:
3324 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
3325 break;
3326 case Bytecodes::_fast_cputfield:
3327 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
3328 break;
3329 case Bytecodes::_fast_fputfield:
3330 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3331 break;
3332 case Bytecodes::_fast_dputfield:
3333 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
3334 break;
3335 default:
3336 ShouldNotReachHere();
3337 }
3338
3339 {
3340 Label notVolatile;
3341 __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3342 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
3343 __ bind(notVolatile);
3344 }
3345 }
3346
3347
3348 void TemplateTable::fast_accessfield(TosState state)
3349 {
3350 transition(atos, state);
3351 // Do the JVMTI work here to avoid disturbing the register state below
3352 if (JvmtiExport::can_post_field_access()) {
3353 // Check to see if a field access watch has been set before we
3354 // take the time to call into the VM.
3355 Label L1;
3356 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3357 __ ldrw(r2, Address(rscratch1));
3358 __ cbzw(r2, L1);
3359 // access constant pool cache entry
3360 __ load_field_entry(c_rarg2, rscratch2);
3361 __ verify_oop(r0);
3362 __ push_ptr(r0); // save object pointer before call_VM() clobbers it
3363 __ mov(c_rarg1, r0);
3364 // c_rarg1: object pointer copied above
3365 // c_rarg2: cache entry pointer
3366 __ call_VM(noreg,
3367 CAST_FROM_FN_PTR(address,
3368 InterpreterRuntime::post_field_access),
3369 c_rarg1, c_rarg2);
3370 __ pop_ptr(r0); // restore object pointer
3371 __ bind(L1);
3372 }
3373
3374 // access constant pool cache
3375 __ load_field_entry(r2, r1);
3376
3377 __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3378 __ verify_field_offset(r1);
3379
3380 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3381
3382 // r0: object
3383 __ verify_oop(r0);
3384 __ null_check(r0);
3385 const Address field(r0, r1);
3386
3387 // 8179954: We need to make sure that the code generated for
3388 // volatile accesses forms a sequentially-consistent set of
3389 // operations when combined with STLR and LDAR. Without a leading
3390 // membar it's possible for a simple Dekker test to fail if loads
3391 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3392 // the stores in one method and we interpret the loads in another.
3393 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3394 Label notVolatile;
3395 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3396 __ membar(MacroAssembler::AnyAny);
3397 __ bind(notVolatile);
3398 }
3399
3400 // access field
3401 switch (bytecode()) {
3402 case Bytecodes::_fast_vgetfield:
3403 {
3404 // field is flat
3405 __ read_flat_field(r2, r0);
3406 __ verify_oop(r0);
3407 }
3408 break;
3409 case Bytecodes::_fast_agetfield:
3410 do_oop_load(_masm, field, r0, IN_HEAP);
3411 __ verify_oop(r0);
3412 break;
3413 case Bytecodes::_fast_lgetfield:
3414 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3415 break;
3416 case Bytecodes::_fast_igetfield:
3417 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3418 break;
3419 case Bytecodes::_fast_bgetfield:
3420 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3421 break;
3422 case Bytecodes::_fast_sgetfield:
3423 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3424 break;
3425 case Bytecodes::_fast_cgetfield:
3426 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3427 break;
3428 case Bytecodes::_fast_fgetfield:
3429 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3430 break;
3431 case Bytecodes::_fast_dgetfield:
3432 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3433 break;
3434 default:
3435 ShouldNotReachHere();
3436 }
3437 {
3438 Label notVolatile;
3439 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3440 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3441 __ bind(notVolatile);
3442 }
3443 }
3444
3445 void TemplateTable::fast_xaccess(TosState state)
3446 {
3447 transition(vtos, state);
3448
3449 // get receiver
3450 __ ldr(r0, aaddress(0));
3451 // access constant pool cache
3452 __ load_field_entry(r2, r3, 2);
3453
3454 __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3455 __ verify_field_offset(r1);
3456
3457 // 8179954: We need to make sure that the code generated for
3458 // volatile accesses forms a sequentially-consistent set of
3459 // operations when combined with STLR and LDAR. Without a leading
3460 // membar it's possible for a simple Dekker test to fail if loads
3461 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3462 // the stores in one method and we interpret the loads in another.
3463 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3464 Label notVolatile;
3465 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3466 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3467 __ membar(MacroAssembler::AnyAny);
3468 __ bind(notVolatile);
3469 }
3470
3471 // make sure exception is reported in correct bcp range (getfield is
3472 // next instruction)
3473 __ increment(rbcp);
3474 __ null_check(r0);
3475 switch (state) {
3476 case itos:
3477 __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3478 break;
3479 case atos:
3480 do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
3481 __ verify_oop(r0);
3482 break;
3483 case ftos:
3484 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3485 break;
3486 default:
3487 ShouldNotReachHere();
3488 }
3489
3490 {
3491 Label notVolatile;
3492 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3493 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3494 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3495 __ bind(notVolatile);
3496 }
3497
3498 __ decrement(rbcp);
3499 }
3500
3501
3502
3503 //-----------------------------------------------------------------------------
3504 // Calls
3505
3506 void TemplateTable::prepare_invoke(Register cache, Register recv) {
3507
3508 Bytecodes::Code code = bytecode();
3509 const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
3510
3511 // save 'interpreter return address'
3512 __ save_bcp();
3513
3514 // Load TOS state for later
3515 __ load_unsigned_byte(rscratch2, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
3516
3517 // load receiver if needed (note: no return address pushed yet)
3518 if (load_receiver) {
3519 __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
3520 __ add(rscratch1, esp, recv, ext::uxtx, 3);
3521 __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
3522 __ verify_oop(recv);
3523 }
3524
3525 // load return address
3526 {
3527 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3528 __ mov(rscratch1, table_addr);
3529 __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3)));
3530 }
3531 }
3532
3533
3534 void TemplateTable::invokevirtual_helper(Register index,
3535 Register recv,
3536 Register flags)
3537 {
3538 // Uses temporary registers r0, r3
3539 assert_different_registers(index, recv, r0, r3);
3540 // Test for an invoke of a final method
3541 Label notFinal;
3542 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, notFinal);
3543
3544 const Register method = index; // method must be rmethod
3545 assert(method == rmethod,
3546 "Method must be rmethod for interpreter calling convention");
3547
3548 // do the call - the index is actually the method to call
3549 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3550
3551 // It's final, need a null check here!
3552 __ null_check(recv);
3553
3554 // profile this call
3555 __ profile_final_call(r0);
3556 __ profile_arguments_type(r0, method, r4, true);
3557
3558 __ jump_from_interpreted(method, r0);
3559
3560 __ bind(notFinal);
3561
3562 // get receiver klass
3563 __ load_klass(r0, recv);
3564
3565 // profile this call
3566 __ profile_virtual_call(r0, rlocals);
3567
3568 // get target Method & entry point
3569 __ lookup_virtual_method(r0, index, method);
3570 __ profile_arguments_type(r3, method, r4, true);
3571 // FIXME -- this looks completely redundant. is it?
3572 // __ ldr(r3, Address(method, Method::interpreter_entry_offset()));
3573 __ jump_from_interpreted(method, r3);
3574 }
3575
3576 void TemplateTable::invokevirtual(int byte_no)
3577 {
3578 transition(vtos, vtos);
3579 assert(byte_no == f2_byte, "use this argument");
3580
3581 load_resolved_method_entry_virtual(r2, // ResolvedMethodEntry*
3582 rmethod, // Method* or itable index
3583 r3); // flags
3584 prepare_invoke(r2, r2); // recv
3585
3586 // rmethod: index (actually a Method*)
3587 // r2: receiver
3588 // r3: flags
3589
3590 invokevirtual_helper(rmethod, r2, r3);
3591 }
3592
3593 void TemplateTable::invokespecial(int byte_no)
3594 {
3595 transition(vtos, vtos);
3596 assert(byte_no == f1_byte, "use this argument");
3597
3598 load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry*
3599 rmethod, // Method*
3600 r3); // flags
3601 prepare_invoke(r2, r2); // get receiver also for null check
3602 __ verify_oop(r2);
3603 __ null_check(r2);
3604 // do the call
3605 __ profile_call(r0);
3606 __ profile_arguments_type(r0, rmethod, rbcp, false);
3607 __ jump_from_interpreted(rmethod, r0);
3608 }
3609
3610 void TemplateTable::invokestatic(int byte_no)
3611 {
3612 transition(vtos, vtos);
3613 assert(byte_no == f1_byte, "use this argument");
3614
3615 load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry*
3616 rmethod, // Method*
3617 r3); // flags
3618 prepare_invoke(r2, r2); // get receiver also for null check
3619
3620 // do the call
3621 __ profile_call(r0);
3622 __ profile_arguments_type(r0, rmethod, r4, false);
3623 __ jump_from_interpreted(rmethod, r0);
3624 }
3625
3626 void TemplateTable::fast_invokevfinal(int byte_no)
3627 {
3628 __ call_Unimplemented();
3629 }
3630
3631 void TemplateTable::invokeinterface(int byte_no) {
3632 transition(vtos, vtos);
3633 assert(byte_no == f1_byte, "use this argument");
3634
3635 load_resolved_method_entry_interface(r2, // ResolvedMethodEntry*
3636 r0, // Klass*
3637 rmethod, // Method* or itable/vtable index
3638 r3); // flags
3639 prepare_invoke(r2, r2); // receiver
3640
3641 // r0: interface klass (from f1)
3642 // rmethod: method (from f2)
3643 // r2: receiver
3644 // r3: flags
3645
3646 // First check for Object case, then private interface method,
3647 // then regular interface method.
3648
3649 // Special case of invokeinterface called for virtual method of
3650 // java.lang.Object. See cpCache.cpp for details.
3651 Label notObjectMethod;
3652 __ tbz(r3, ResolvedMethodEntry::is_forced_virtual_shift, notObjectMethod);
3653
3654 invokevirtual_helper(rmethod, r2, r3);
3655 __ bind(notObjectMethod);
3656
3657 Label no_such_interface;
3658
3659 // Check for private method invocation - indicated by vfinal
3660 Label notVFinal;
3661 __ tbz(r3, ResolvedMethodEntry::is_vfinal_shift, notVFinal);
3662
3663 // Get receiver klass into r3
3664 __ load_klass(r3, r2);
3665
3666 Label subtype;
3667 __ check_klass_subtype(r3, r0, r4, subtype);
3668 // If we get here the typecheck failed
3669 __ b(no_such_interface);
3670 __ bind(subtype);
3671
3672 __ profile_final_call(r0);
3673 __ profile_arguments_type(r0, rmethod, r4, true);
3674 __ jump_from_interpreted(rmethod, r0);
3675
3676 __ bind(notVFinal);
3677
3678 // Get receiver klass into r3
3679 __ load_klass(r3, r2);
3680
3681 Label no_such_method;
3682
3683 // Preserve method for throw_AbstractMethodErrorVerbose.
3684 __ mov(r16, rmethod);
3685 // Receiver subtype check against REFC.
3686 // Superklass in r0. Subklass in r3. Blows rscratch2, r13
3687 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3688 r3, r0, noreg,
3689 // outputs: scan temp. reg, scan temp. reg
3690 rscratch2, r13,
3691 no_such_interface,
3692 /*return_method=*/false);
3693
3694 // profile this call
3695 __ profile_virtual_call(r3, r13);
3696
3697 // Get declaring interface class from method, and itable index
3698
3699 __ load_method_holder(r0, rmethod);
3700 __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset()));
3701 __ subw(rmethod, rmethod, Method::itable_index_max);
3702 __ negw(rmethod, rmethod);
3703
3704 // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3705 __ mov(rlocals, r3);
3706 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3707 rlocals, r0, rmethod,
3708 // outputs: method, scan temp. reg
3709 rmethod, r13,
3710 no_such_interface);
3711
3712 // rmethod,: Method to call
3713 // r2: receiver
3714 // Check for abstract method error
3715 // Note: This should be done more efficiently via a throw_abstract_method_error
3716 // interpreter entry point and a conditional jump to it in case of a null
3717 // method.
3718 __ cbz(rmethod, no_such_method);
3719
3720 __ profile_arguments_type(r3, rmethod, r13, true);
3721
3722 // do the call
3723 // r2: receiver
3724 // rmethod,: Method
3725 __ jump_from_interpreted(rmethod, r3);
3726 __ should_not_reach_here();
3727
3728 // exception handling code follows...
3729 // note: must restore interpreter registers to canonical
3730 // state for exception handling to work correctly!
3731
3732 __ bind(no_such_method);
3733 // throw exception
3734 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
3735 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3736 // Pass arguments for generating a verbose error message.
3737 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16);
3738 // the call_VM checks for exception, so we should never return here.
3739 __ should_not_reach_here();
3740
3741 __ bind(no_such_interface);
3742 // throw exception
3743 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
3744 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3745 // Pass arguments for generating a verbose error message.
3746 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3747 InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0);
3748 // the call_VM checks for exception, so we should never return here.
3749 __ should_not_reach_here();
3750 return;
3751 }
3752
3753 void TemplateTable::invokehandle(int byte_no) {
3754 transition(vtos, vtos);
3755 assert(byte_no == f1_byte, "use this argument");
3756
3757 load_resolved_method_entry_handle(r2, // ResolvedMethodEntry*
3758 rmethod, // Method*
3759 r0, // Resolved reference
3760 r3); // flags
3761 prepare_invoke(r2, r2);
3762
3763 __ verify_method_ptr(r2);
3764 __ verify_oop(r2);
3765 __ null_check(r2);
3766
3767 // FIXME: profile the LambdaForm also
3768
3769 // r13 is safe to use here as a scratch reg because it is about to
3770 // be clobbered by jump_from_interpreted().
3771 __ profile_final_call(r13);
3772 __ profile_arguments_type(r13, rmethod, r4, true);
3773
3774 __ jump_from_interpreted(rmethod, r0);
3775 }
3776
3777 void TemplateTable::invokedynamic(int byte_no) {
3778 transition(vtos, vtos);
3779 assert(byte_no == f1_byte, "use this argument");
3780
3781 load_invokedynamic_entry(rmethod);
3782
3783 // r0: CallSite object (from cpool->resolved_references[])
3784 // rmethod: MH.linkToCallSite method
3785
3786 // Note: r0_callsite is already pushed
3787
3788 // %%% should make a type profile for any invokedynamic that takes a ref argument
3789 // profile this call
3790 __ profile_call(rbcp);
3791 __ profile_arguments_type(r3, rmethod, r13, false);
3792
3793 __ verify_oop(r0);
3794
3795 __ jump_from_interpreted(rmethod, r0);
3796 }
3797
3798
3799 //-----------------------------------------------------------------------------
3800 // Allocation
3801
3802 void TemplateTable::_new() {
3803 transition(vtos, atos);
3804
3805 __ get_unsigned_2_byte_index_at_bcp(r3, 1);
3806 Label slow_case;
3807 Label done;
3808 Label initialize_header;
3809
3810 __ get_cpool_and_tags(r4, r0);
3811 // Make sure the class we're about to instantiate has been resolved.
3812 // This is done before loading InstanceKlass to be consistent with the order
3813 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3814 const int tags_offset = Array<u1>::base_offset_in_bytes();
3815 __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
3816 __ lea(rscratch1, Address(rscratch1, tags_offset));
3817 __ ldarb(rscratch1, rscratch1);
3818 __ cmp(rscratch1, (u1)JVM_CONSTANT_Class);
3819 __ br(Assembler::NE, slow_case);
3820
3821 // get InstanceKlass
3822 __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
3823
3824 // make sure klass is initialized
3825 assert(VM_Version::supports_fast_class_init_checks(), "Optimization requires support for fast class initialization checks");
3826 __ clinit_barrier(r4, rscratch1, nullptr /*L_fast_path*/, &slow_case);
3827
3828 __ allocate_instance(r4, r0, r3, r1, true, slow_case);
3829 __ b(done);
3830
3831 // slow case
3832 __ bind(slow_case);
3833 __ get_constant_pool(c_rarg1);
3834 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3835 __ call_VM_preemptable(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3836 __ verify_oop(r0);
3837
3838 // continue
3839 __ bind(done);
3840 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3841 __ membar(Assembler::StoreStore);
3842 }
3843
3844 void TemplateTable::newarray() {
3845 transition(itos, atos);
3846 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3847 __ mov(c_rarg2, r0);
3848 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3849 c_rarg1, c_rarg2);
3850 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3851 __ membar(Assembler::StoreStore);
3852 }
3853
3854 void TemplateTable::anewarray() {
3855 transition(itos, atos);
3856 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3857 __ get_constant_pool(c_rarg1);
3858 __ mov(c_rarg3, r0);
3859 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3860 c_rarg1, c_rarg2, c_rarg3);
3861 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3862 __ membar(Assembler::StoreStore);
3863 }
3864
3865 void TemplateTable::arraylength() {
3866 transition(atos, itos);
3867 __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes()));
3868 }
3869
3870 void TemplateTable::checkcast()
3871 {
3872 transition(atos, atos);
3873 Label done, is_null, ok_is_subtype, quicked, resolved;
3874 __ cbz(r0, is_null);
3875
3876 // Get cpool & tags index
3877 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3878 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3879 // See if bytecode has already been quicked
3880 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3881 __ lea(r1, Address(rscratch1, r19));
3882 __ ldarb(r1, r1);
3883 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3884 __ br(Assembler::EQ, quicked);
3885
3886 __ push(atos); // save receiver for result, and for GC
3887 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3888 __ get_vm_result_metadata(r0, rthread);
3889 __ pop(r3); // restore receiver
3890 __ b(resolved);
3891
3892 // Get superklass in r0 and subklass in r3
3893 __ bind(quicked);
3894 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3895 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3896
3897 __ bind(resolved);
3898 __ load_klass(r19, r3);
3899
3900 // Generate subtype check. Blows r2, r5. Object in r3.
3901 // Superklass in r0. Subklass in r19.
3902 __ gen_subtype_check(r19, ok_is_subtype);
3903
3904 // Come here on failure
3905 __ push(r3);
3906 // object is at TOS
3907 __ b(Interpreter::_throw_ClassCastException_entry);
3908
3909 // Come here on success
3910 __ bind(ok_is_subtype);
3911 __ mov(r0, r3); // Restore object in r3
3912
3913 __ b(done);
3914 __ bind(is_null);
3915
3916 // Collect counts on whether this test sees nulls a lot or not.
3917 if (ProfileInterpreter) {
3918 __ profile_null_seen(r2);
3919 }
3920
3921 __ bind(done);
3922 }
3923
3924 void TemplateTable::instanceof() {
3925 transition(atos, itos);
3926 Label done, is_null, ok_is_subtype, quicked, resolved;
3927 __ cbz(r0, is_null);
3928
3929 // Get cpool & tags index
3930 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3931 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3932 // See if bytecode has already been quicked
3933 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3934 __ lea(r1, Address(rscratch1, r19));
3935 __ ldarb(r1, r1);
3936 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3937 __ br(Assembler::EQ, quicked);
3938
3939 __ push(atos); // save receiver for result, and for GC
3940 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3941 __ get_vm_result_metadata(r0, rthread);
3942 __ pop(r3); // restore receiver
3943 __ verify_oop(r3);
3944 __ load_klass(r3, r3);
3945 __ b(resolved);
3946
3947 // Get superklass in r0 and subklass in r3
3948 __ bind(quicked);
3949 __ load_klass(r3, r0);
3950 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
3951
3952 __ bind(resolved);
3953
3954 // Generate subtype check. Blows r2, r5
3955 // Superklass in r0. Subklass in r3.
3956 __ gen_subtype_check(r3, ok_is_subtype);
3957
3958 // Come here on failure
3959 __ mov(r0, 0);
3960 __ b(done);
3961 // Come here on success
3962 __ bind(ok_is_subtype);
3963 __ mov(r0, 1);
3964
3965 // Collect counts on whether this test sees nulls a lot or not.
3966 if (ProfileInterpreter) {
3967 __ b(done);
3968 __ bind(is_null);
3969 __ profile_null_seen(r2);
3970 } else {
3971 __ bind(is_null); // same as 'done'
3972 }
3973 __ bind(done);
3974 // r0 = 0: obj == nullptr or obj is not an instanceof the specified klass
3975 // r0 = 1: obj != nullptr and obj is an instanceof the specified klass
3976 }
3977
3978 //-----------------------------------------------------------------------------
3979 // Breakpoints
3980 void TemplateTable::_breakpoint() {
3981 // Note: We get here even if we are single stepping..
3982 // jbug inists on setting breakpoints at every bytecode
3983 // even if we are in single step mode.
3984
3985 transition(vtos, vtos);
3986
3987 // get the unpatched byte code
3988 __ get_method(c_rarg1);
3989 __ call_VM(noreg,
3990 CAST_FROM_FN_PTR(address,
3991 InterpreterRuntime::get_original_bytecode_at),
3992 c_rarg1, rbcp);
3993 __ mov(r19, r0);
3994
3995 // post the breakpoint event
3996 __ call_VM(noreg,
3997 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3998 rmethod, rbcp);
3999
4000 // complete the execution of original bytecode
4001 __ mov(rscratch1, r19);
4002 __ dispatch_only_normal(vtos);
4003 }
4004
4005 //-----------------------------------------------------------------------------
4006 // Exceptions
4007
4008 void TemplateTable::athrow() {
4009 transition(atos, vtos);
4010 __ null_check(r0);
4011 __ b(Interpreter::throw_exception_entry());
4012 }
4013
4014 //-----------------------------------------------------------------------------
4015 // Synchronization
4016 //
4017 // Note: monitorenter & exit are symmetric routines; which is reflected
4018 // in the assembly code structure as well
4019 //
4020 // Stack layout:
4021 //
4022 // [expressions ] <--- esp = expression stack top
4023 // ..
4024 // [expressions ]
4025 // [monitor entry] <--- monitor block top = expression stack bot
4026 // ..
4027 // [monitor entry]
4028 // [frame data ] <--- monitor block bot
4029 // ...
4030 // [saved rfp ] <--- rfp
4031 void TemplateTable::monitorenter()
4032 {
4033 transition(atos, vtos);
4034
4035 // check for null object
4036 __ null_check(r0);
4037
4038 Label is_inline_type;
4039 __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
4040 __ test_markword_is_inline_type(rscratch1, is_inline_type);
4041
4042 const Address monitor_block_top(
4043 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4044 const Address monitor_block_bot(
4045 rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
4046 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
4047
4048 Label allocated;
4049
4050 // initialize entry pointer
4051 __ mov(c_rarg1, zr); // points to free slot or null
4052
4053 // find a free slot in the monitor block (result in c_rarg1)
4054 {
4055 Label entry, loop, exit;
4056 __ ldr(c_rarg3, monitor_block_top); // derelativize pointer
4057 __ lea(c_rarg3, Address(rfp, c_rarg3, Address::lsl(Interpreter::logStackElementSize)));
4058 // c_rarg3 points to current entry, starting with top-most entry
4059
4060 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
4061
4062 __ b(entry);
4063
4064 __ bind(loop);
4065 // check if current entry is used
4066 // if not used then remember entry in c_rarg1
4067 __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset()));
4068 __ cmp(zr, rscratch1);
4069 __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
4070 // check if current entry is for same object
4071 __ cmp(r0, rscratch1);
4072 // if same object then stop searching
4073 __ br(Assembler::EQ, exit);
4074 // otherwise advance to next entry
4075 __ add(c_rarg3, c_rarg3, entry_size);
4076 __ bind(entry);
4077 // check if bottom reached
4078 __ cmp(c_rarg3, c_rarg2);
4079 // if not at bottom then check this entry
4080 __ br(Assembler::NE, loop);
4081 __ bind(exit);
4082 }
4083
4084 __ cbnz(c_rarg1, allocated); // check if a slot has been found and
4085 // if found, continue with that on
4086
4087 // allocate one if there's no free slot
4088 {
4089 Label entry, loop;
4090 // 1. compute new pointers // rsp: old expression stack top
4091
4092 __ check_extended_sp();
4093 __ sub(sp, sp, entry_size); // make room for the monitor
4094 __ sub(rscratch1, sp, rfp);
4095 __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
4096 __ str(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
4097
4098 __ ldr(c_rarg1, monitor_block_bot); // derelativize pointer
4099 __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize)));
4100 // c_rarg1 points to the old expression stack bottom
4101
4102 __ sub(esp, esp, entry_size); // move expression stack top
4103 __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
4104 __ mov(c_rarg3, esp); // set start value for copy loop
4105 __ sub(rscratch1, c_rarg1, rfp); // relativize pointer
4106 __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
4107 __ str(rscratch1, monitor_block_bot); // set new monitor block bottom
4108
4109 __ b(entry);
4110 // 2. move expression stack contents
4111 __ bind(loop);
4112 __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
4113 // word from old location
4114 __ str(c_rarg2, Address(c_rarg3, 0)); // and store it at new location
4115 __ add(c_rarg3, c_rarg3, wordSize); // advance to next word
4116 __ bind(entry);
4117 __ cmp(c_rarg3, c_rarg1); // check if bottom reached
4118 __ br(Assembler::NE, loop); // if not at bottom then
4119 // copy next word
4120 }
4121
4122 // call run-time routine
4123 // c_rarg1: points to monitor entry
4124 __ bind(allocated);
4125
4126 // Increment bcp to point to the next bytecode, so exception
4127 // handling for async. exceptions work correctly.
4128 // The object has already been popped from the stack, so the
4129 // expression stack looks correct.
4130 __ increment(rbcp);
4131
4132 // store object
4133 __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset()));
4134 __ lock_object(c_rarg1);
4135
4136 // check to make sure this monitor doesn't cause stack overflow after locking
4137 __ save_bcp(); // in case of exception
4138 __ generate_stack_overflow_check(0);
4139
4140 // The bcp has already been incremented. Just need to dispatch to
4141 // next instruction.
4142 __ dispatch_next(vtos);
4143
4144 __ bind(is_inline_type);
4145 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4146 InterpreterRuntime::throw_identity_exception), r0);
4147 __ should_not_reach_here();
4148 }
4149
4150
4151 void TemplateTable::monitorexit()
4152 {
4153 transition(atos, vtos);
4154
4155 // check for null object
4156 __ null_check(r0);
4157
4158 const int is_inline_type_mask = markWord::inline_type_pattern;
4159 Label has_identity;
4160 __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
4161 __ mov(rscratch2, is_inline_type_mask);
4162 __ andr(rscratch1, rscratch1, rscratch2);
4163 __ cmp(rscratch1, rscratch2);
4164 __ br(Assembler::NE, has_identity);
4165 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4166 InterpreterRuntime::throw_illegal_monitor_state_exception));
4167 __ should_not_reach_here();
4168 __ bind(has_identity);
4169
4170 const Address monitor_block_top(
4171 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4172 const Address monitor_block_bot(
4173 rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
4174 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
4175
4176 Label found;
4177
4178 // find matching slot
4179 {
4180 Label entry, loop;
4181 __ ldr(c_rarg1, monitor_block_top); // derelativize pointer
4182 __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize)));
4183 // c_rarg1 points to current entry, starting with top-most entry
4184
4185 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
4186 // of monitor block
4187 __ b(entry);
4188
4189 __ bind(loop);
4190 // check if current entry is for same object
4191 __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset()));
4192 __ cmp(r0, rscratch1);
4193 // if same object then stop searching
4194 __ br(Assembler::EQ, found);
4195 // otherwise advance to next entry
4196 __ add(c_rarg1, c_rarg1, entry_size);
4197 __ bind(entry);
4198 // check if bottom reached
4199 __ cmp(c_rarg1, c_rarg2);
4200 // if not at bottom then check this entry
4201 __ br(Assembler::NE, loop);
4202 }
4203
4204 // error handling. Unlocking was not block-structured
4205 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4206 InterpreterRuntime::throw_illegal_monitor_state_exception));
4207 __ should_not_reach_here();
4208
4209 // call run-time routine
4210 __ bind(found);
4211 __ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
4212 __ unlock_object(c_rarg1);
4213 __ pop_ptr(r0); // discard object
4214 }
4215
4216
4217 // Wide instructions
4218 void TemplateTable::wide()
4219 {
4220 __ load_unsigned_byte(r19, at_bcp(1));
4221 __ mov(rscratch1, (address)Interpreter::_wentry_point);
4222 __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3)));
4223 __ br(rscratch1);
4224 }
4225
4226
4227 // Multi arrays
4228 void TemplateTable::multianewarray() {
4229 transition(vtos, atos);
4230 __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions
4231 // last dim is on top of stack; we want address of first one:
4232 // first_addr = last_addr + (ndims - 1) * wordSize
4233 __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3)));
4234 __ sub(c_rarg1, c_rarg1, wordSize);
4235 call_VM(r0,
4236 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
4237 c_rarg1);
4238 __ load_unsigned_byte(r1, at_bcp(3));
4239 __ lea(esp, Address(esp, r1, Address::uxtw(3)));
4240 }