1 /*
2 * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "gc/shared/barrierSetAssembler.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "interpreter/interpreterRuntime.hpp"
33 #include "interpreter/interp_masm.hpp"
34 #include "interpreter/templateTable.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/methodData.hpp"
37 #include "oops/method.hpp"
38 #include "oops/objArrayKlass.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "prims/methodHandles.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "runtime/synchronizer.hpp"
46 #include "utilities/powerOfTwo.hpp"
47
48 #define __ _masm->
49
50 // Address computation: local variables
51
52 static inline Address iaddress(int n) {
53 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
54 }
55
56 static inline Address laddress(int n) {
57 return iaddress(n + 1);
58 }
59
60 static inline Address faddress(int n) {
61 return iaddress(n);
62 }
63
64 static inline Address daddress(int n) {
65 return laddress(n);
66 }
67
68 static inline Address aaddress(int n) {
69 return iaddress(n);
70 }
71
72 static inline Address iaddress(Register r) {
73 return Address(rlocals, r, Address::lsl(3));
74 }
75
76 static inline Address laddress(Register r, Register scratch,
77 InterpreterMacroAssembler* _masm) {
78 __ lea(scratch, Address(rlocals, r, Address::lsl(3)));
79 return Address(scratch, Interpreter::local_offset_in_bytes(1));
80 }
81
82 static inline Address faddress(Register r) {
83 return iaddress(r);
84 }
85
86 static inline Address daddress(Register r, Register scratch,
87 InterpreterMacroAssembler* _masm) {
88 return laddress(r, scratch, _masm);
89 }
90
91 static inline Address aaddress(Register r) {
92 return iaddress(r);
93 }
94
95 static inline Address at_rsp() {
96 return Address(esp, 0);
97 }
98
99 // At top of Java expression stack which may be different than esp(). It
100 // isn't for category 1 objects.
101 static inline Address at_tos () {
102 return Address(esp, Interpreter::expr_offset_in_bytes(0));
103 }
104
105 static inline Address at_tos_p1() {
106 return Address(esp, Interpreter::expr_offset_in_bytes(1));
107 }
108
109 static inline Address at_tos_p2() {
110 return Address(esp, Interpreter::expr_offset_in_bytes(2));
111 }
112
113 static inline Address at_tos_p3() {
114 return Address(esp, Interpreter::expr_offset_in_bytes(3));
115 }
116
117 static inline Address at_tos_p4() {
118 return Address(esp, Interpreter::expr_offset_in_bytes(4));
119 }
120
121 static inline Address at_tos_p5() {
122 return Address(esp, Interpreter::expr_offset_in_bytes(5));
123 }
124
125 // Condition conversion
126 static Assembler::Condition j_not(TemplateTable::Condition cc) {
127 switch (cc) {
128 case TemplateTable::equal : return Assembler::NE;
129 case TemplateTable::not_equal : return Assembler::EQ;
130 case TemplateTable::less : return Assembler::GE;
131 case TemplateTable::less_equal : return Assembler::GT;
132 case TemplateTable::greater : return Assembler::LE;
133 case TemplateTable::greater_equal: return Assembler::LT;
134 }
135 ShouldNotReachHere();
136 return Assembler::EQ;
137 }
138
139
140 // Miscelaneous helper routines
141 // Store an oop (or NULL) at the Address described by obj.
142 // If val == noreg this means store a NULL
143 static void do_oop_store(InterpreterMacroAssembler* _masm,
144 Address dst,
145 Register val,
146 DecoratorSet decorators) {
147 assert(val == noreg || val == r0, "parameter is just for looks");
148 __ store_heap_oop(dst, val, r10, r1, decorators);
149 }
150
151 static void do_oop_load(InterpreterMacroAssembler* _masm,
152 Address src,
153 Register dst,
154 DecoratorSet decorators) {
155 __ load_heap_oop(dst, src, r10, r1, decorators);
156 }
157
158 Address TemplateTable::at_bcp(int offset) {
159 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
160 return Address(rbcp, offset);
161 }
162
163 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
164 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
165 int byte_no)
166 {
167 if (!RewriteBytecodes) return;
168 Label L_patch_done;
169
170 switch (bc) {
171 case Bytecodes::_fast_aputfield:
172 case Bytecodes::_fast_bputfield:
173 case Bytecodes::_fast_zputfield:
174 case Bytecodes::_fast_cputfield:
175 case Bytecodes::_fast_dputfield:
176 case Bytecodes::_fast_fputfield:
177 case Bytecodes::_fast_iputfield:
178 case Bytecodes::_fast_lputfield:
179 case Bytecodes::_fast_sputfield:
180 {
181 // We skip bytecode quickening for putfield instructions when
182 // the put_code written to the constant pool cache is zero.
183 // This is required so that every execution of this instruction
184 // calls out to InterpreterRuntime::resolve_get_put to do
185 // additional, required work.
186 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
187 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
188 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
189 __ movw(bc_reg, bc);
190 __ cbzw(temp_reg, L_patch_done); // don't patch
191 }
192 break;
193 default:
194 assert(byte_no == -1, "sanity");
195 // the pair bytecodes have already done the load.
196 if (load_bc_into_bc_reg) {
197 __ movw(bc_reg, bc);
198 }
199 }
200
201 if (JvmtiExport::can_post_breakpoint()) {
202 Label L_fast_patch;
203 // if a breakpoint is present we can't rewrite the stream directly
204 __ load_unsigned_byte(temp_reg, at_bcp(0));
205 __ cmpw(temp_reg, Bytecodes::_breakpoint);
206 __ br(Assembler::NE, L_fast_patch);
207 // Let breakpoint table handling rewrite to quicker bytecode
208 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
209 __ b(L_patch_done);
210 __ bind(L_fast_patch);
211 }
212
213 #ifdef ASSERT
214 Label L_okay;
215 __ load_unsigned_byte(temp_reg, at_bcp(0));
216 __ cmpw(temp_reg, (int) Bytecodes::java_code(bc));
217 __ br(Assembler::EQ, L_okay);
218 __ cmpw(temp_reg, bc_reg);
219 __ br(Assembler::EQ, L_okay);
220 __ stop("patching the wrong bytecode");
221 __ bind(L_okay);
222 #endif
223
224 // patch bytecode
225 __ strb(bc_reg, at_bcp(0));
226 __ bind(L_patch_done);
227 }
228
229
230 // Individual instructions
231
232 void TemplateTable::nop() {
233 transition(vtos, vtos);
234 // nothing to do
235 }
236
237 void TemplateTable::shouldnotreachhere() {
238 transition(vtos, vtos);
239 __ stop("shouldnotreachhere bytecode");
240 }
241
242 void TemplateTable::aconst_null()
243 {
244 transition(vtos, atos);
245 __ mov(r0, 0);
246 }
247
248 void TemplateTable::iconst(int value)
249 {
250 transition(vtos, itos);
251 __ mov(r0, value);
252 }
253
254 void TemplateTable::lconst(int value)
255 {
256 __ mov(r0, value);
257 }
258
259 void TemplateTable::fconst(int value)
260 {
261 transition(vtos, ftos);
262 switch (value) {
263 case 0:
264 __ fmovs(v0, 0.0);
265 break;
266 case 1:
267 __ fmovs(v0, 1.0);
268 break;
269 case 2:
270 __ fmovs(v0, 2.0);
271 break;
272 default:
273 ShouldNotReachHere();
274 break;
275 }
276 }
277
278 void TemplateTable::dconst(int value)
279 {
280 transition(vtos, dtos);
281 switch (value) {
282 case 0:
283 __ fmovd(v0, 0.0);
284 break;
285 case 1:
286 __ fmovd(v0, 1.0);
287 break;
288 case 2:
289 __ fmovd(v0, 2.0);
290 break;
291 default:
292 ShouldNotReachHere();
293 break;
294 }
295 }
296
297 void TemplateTable::bipush()
298 {
299 transition(vtos, itos);
300 __ load_signed_byte32(r0, at_bcp(1));
301 }
302
303 void TemplateTable::sipush()
304 {
305 transition(vtos, itos);
306 __ load_unsigned_short(r0, at_bcp(1));
307 __ revw(r0, r0);
308 __ asrw(r0, r0, 16);
309 }
310
311 void TemplateTable::ldc(bool wide)
312 {
313 transition(vtos, vtos);
314 Label call_ldc, notFloat, notClass, notInt, Done;
315
316 if (wide) {
317 __ get_unsigned_2_byte_index_at_bcp(r1, 1);
318 } else {
319 __ load_unsigned_byte(r1, at_bcp(1));
320 }
321 __ get_cpool_and_tags(r2, r0);
322
323 const int base_offset = ConstantPool::header_size() * wordSize;
324 const int tags_offset = Array<u1>::base_offset_in_bytes();
325
326 // get type
327 __ add(r3, r1, tags_offset);
328 __ lea(r3, Address(r0, r3));
329 __ ldarb(r3, r3);
330
331 // unresolved class - get the resolved class
332 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass);
333 __ br(Assembler::EQ, call_ldc);
334
335 // unresolved class in error state - call into runtime to throw the error
336 // from the first resolution attempt
337 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError);
338 __ br(Assembler::EQ, call_ldc);
339
340 // resolved class - need to call vm to get java mirror of the class
341 __ cmp(r3, (u1)JVM_CONSTANT_Class);
342 __ br(Assembler::NE, notClass);
343
344 __ bind(call_ldc);
345 __ mov(c_rarg1, wide);
346 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
347 __ push_ptr(r0);
348 __ verify_oop(r0);
349 __ b(Done);
350
351 __ bind(notClass);
352 __ cmp(r3, (u1)JVM_CONSTANT_Float);
353 __ br(Assembler::NE, notFloat);
354 // ftos
355 __ adds(r1, r2, r1, Assembler::LSL, 3);
356 __ ldrs(v0, Address(r1, base_offset));
357 __ push_f();
358 __ b(Done);
359
360 __ bind(notFloat);
361
362 __ cmp(r3, (u1)JVM_CONSTANT_Integer);
363 __ br(Assembler::NE, notInt);
364
365 // itos
366 __ adds(r1, r2, r1, Assembler::LSL, 3);
367 __ ldrw(r0, Address(r1, base_offset));
368 __ push_i(r0);
369 __ b(Done);
370
371 __ bind(notInt);
372 condy_helper(Done);
373
374 __ bind(Done);
375 }
376
377 // Fast path for caching oop constants.
378 void TemplateTable::fast_aldc(bool wide)
379 {
380 transition(vtos, atos);
381
382 Register result = r0;
383 Register tmp = r1;
384 Register rarg = r2;
385
386 int index_size = wide ? sizeof(u2) : sizeof(u1);
387
388 Label resolved;
389
390 // We are resolved if the resolved reference cache entry contains a
391 // non-null object (String, MethodType, etc.)
392 assert_different_registers(result, tmp);
393 __ get_cache_index_at_bcp(tmp, 1, index_size);
394 __ load_resolved_reference_at_index(result, tmp);
395 __ cbnz(result, resolved);
396
397 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
398
399 // first time invocation - must resolve first
400 __ mov(rarg, (int)bytecode());
401 __ call_VM(result, entry, rarg);
402
403 __ bind(resolved);
404
405 { // Check for the null sentinel.
406 // If we just called the VM, it already did the mapping for us,
407 // but it's harmless to retry.
408 Label notNull;
409
410 // Stash null_sentinel address to get its value later
411 __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
412 __ ldr(tmp, Address(rarg));
413 __ resolve_oop_handle(tmp);
414 __ cmpoop(result, tmp);
415 __ br(Assembler::NE, notNull);
416 __ mov(result, 0); // NULL object reference
417 __ bind(notNull);
418 }
419
420 if (VerifyOops) {
421 // Safe to call with 0 result
422 __ verify_oop(result);
423 }
424 }
425
426 void TemplateTable::ldc2_w()
427 {
428 transition(vtos, vtos);
429 Label notDouble, notLong, Done;
430 __ get_unsigned_2_byte_index_at_bcp(r0, 1);
431
432 __ get_cpool_and_tags(r1, r2);
433 const int base_offset = ConstantPool::header_size() * wordSize;
434 const int tags_offset = Array<u1>::base_offset_in_bytes();
435
436 // get type
437 __ lea(r2, Address(r2, r0, Address::lsl(0)));
438 __ load_unsigned_byte(r2, Address(r2, tags_offset));
439 __ cmpw(r2, (int)JVM_CONSTANT_Double);
440 __ br(Assembler::NE, notDouble);
441
442 // dtos
443 __ lea (r2, Address(r1, r0, Address::lsl(3)));
444 __ ldrd(v0, Address(r2, base_offset));
445 __ push_d();
446 __ b(Done);
447
448 __ bind(notDouble);
449 __ cmpw(r2, (int)JVM_CONSTANT_Long);
450 __ br(Assembler::NE, notLong);
451
452 // ltos
453 __ lea(r0, Address(r1, r0, Address::lsl(3)));
454 __ ldr(r0, Address(r0, base_offset));
455 __ push_l();
456 __ b(Done);
457
458 __ bind(notLong);
459 condy_helper(Done);
460
461 __ bind(Done);
462 }
463
464 void TemplateTable::condy_helper(Label& Done)
465 {
466 Register obj = r0;
467 Register rarg = r1;
468 Register flags = r2;
469 Register off = r3;
470
471 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
472
473 __ mov(rarg, (int) bytecode());
474 __ call_VM(obj, entry, rarg);
475
476 __ get_vm_result_2(flags, rthread);
477
478 // VMr = obj = base address to find primitive value to push
479 // VMr2 = flags = (tos, off) using format of CPCE::_flags
480 __ mov(off, flags);
481 __ andw(off, off, ConstantPoolCacheEntry::field_index_mask);
482
483 const Address field(obj, off);
484
485 // What sort of thing are we loading?
486 // x86 uses a shift and mask or wings it with a shift plus assert
487 // the mask is not needed. aarch64 just uses bitfield extract
488 __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,
489 ConstantPoolCacheEntry::tos_state_bits);
490
491 switch (bytecode()) {
492 case Bytecodes::_ldc:
493 case Bytecodes::_ldc_w:
494 {
495 // tos in (itos, ftos, stos, btos, ctos, ztos)
496 Label notInt, notFloat, notShort, notByte, notChar, notBool;
497 __ cmpw(flags, itos);
498 __ br(Assembler::NE, notInt);
499 // itos
500 __ ldrw(r0, field);
501 __ push(itos);
502 __ b(Done);
503
504 __ bind(notInt);
505 __ cmpw(flags, ftos);
506 __ br(Assembler::NE, notFloat);
507 // ftos
508 __ load_float(field);
509 __ push(ftos);
510 __ b(Done);
511
512 __ bind(notFloat);
513 __ cmpw(flags, stos);
514 __ br(Assembler::NE, notShort);
515 // stos
516 __ load_signed_short(r0, field);
517 __ push(stos);
518 __ b(Done);
519
520 __ bind(notShort);
521 __ cmpw(flags, btos);
522 __ br(Assembler::NE, notByte);
523 // btos
524 __ load_signed_byte(r0, field);
525 __ push(btos);
526 __ b(Done);
527
528 __ bind(notByte);
529 __ cmpw(flags, ctos);
530 __ br(Assembler::NE, notChar);
531 // ctos
532 __ load_unsigned_short(r0, field);
533 __ push(ctos);
534 __ b(Done);
535
536 __ bind(notChar);
537 __ cmpw(flags, ztos);
538 __ br(Assembler::NE, notBool);
539 // ztos
540 __ load_signed_byte(r0, field);
541 __ push(ztos);
542 __ b(Done);
543
544 __ bind(notBool);
545 break;
546 }
547
548 case Bytecodes::_ldc2_w:
549 {
550 Label notLong, notDouble;
551 __ cmpw(flags, ltos);
552 __ br(Assembler::NE, notLong);
553 // ltos
554 __ ldr(r0, field);
555 __ push(ltos);
556 __ b(Done);
557
558 __ bind(notLong);
559 __ cmpw(flags, dtos);
560 __ br(Assembler::NE, notDouble);
561 // dtos
562 __ load_double(field);
563 __ push(dtos);
564 __ b(Done);
565
566 __ bind(notDouble);
567 break;
568 }
569
570 default:
571 ShouldNotReachHere();
572 }
573
574 __ stop("bad ldc/condy");
575 }
576
577 void TemplateTable::locals_index(Register reg, int offset)
578 {
579 __ ldrb(reg, at_bcp(offset));
580 __ neg(reg, reg);
581 }
582
583 void TemplateTable::iload() {
584 iload_internal();
585 }
586
587 void TemplateTable::nofast_iload() {
588 iload_internal(may_not_rewrite);
589 }
590
591 void TemplateTable::iload_internal(RewriteControl rc) {
592 transition(vtos, itos);
593 if (RewriteFrequentPairs && rc == may_rewrite) {
594 Label rewrite, done;
595 Register bc = r4;
596
597 // get next bytecode
598 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
599
600 // if _iload, wait to rewrite to iload2. We only want to rewrite the
601 // last two iloads in a pair. Comparing against fast_iload means that
602 // the next bytecode is neither an iload or a caload, and therefore
603 // an iload pair.
604 __ cmpw(r1, Bytecodes::_iload);
605 __ br(Assembler::EQ, done);
606
607 // if _fast_iload rewrite to _fast_iload2
608 __ cmpw(r1, Bytecodes::_fast_iload);
609 __ movw(bc, Bytecodes::_fast_iload2);
610 __ br(Assembler::EQ, rewrite);
611
612 // if _caload rewrite to _fast_icaload
613 __ cmpw(r1, Bytecodes::_caload);
614 __ movw(bc, Bytecodes::_fast_icaload);
615 __ br(Assembler::EQ, rewrite);
616
617 // else rewrite to _fast_iload
618 __ movw(bc, Bytecodes::_fast_iload);
619
620 // rewrite
621 // bc: new bytecode
622 __ bind(rewrite);
623 patch_bytecode(Bytecodes::_iload, bc, r1, false);
624 __ bind(done);
625
626 }
627
628 // do iload, get the local value into tos
629 locals_index(r1);
630 __ ldr(r0, iaddress(r1));
631
632 }
633
634 void TemplateTable::fast_iload2()
635 {
636 transition(vtos, itos);
637 locals_index(r1);
638 __ ldr(r0, iaddress(r1));
639 __ push(itos);
640 locals_index(r1, 3);
641 __ ldr(r0, iaddress(r1));
642 }
643
644 void TemplateTable::fast_iload()
645 {
646 transition(vtos, itos);
647 locals_index(r1);
648 __ ldr(r0, iaddress(r1));
649 }
650
651 void TemplateTable::lload()
652 {
653 transition(vtos, ltos);
654 __ ldrb(r1, at_bcp(1));
655 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
656 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
657 }
658
659 void TemplateTable::fload()
660 {
661 transition(vtos, ftos);
662 locals_index(r1);
663 // n.b. we use ldrd here because this is a 64 bit slot
664 // this is comparable to the iload case
665 __ ldrd(v0, faddress(r1));
666 }
667
668 void TemplateTable::dload()
669 {
670 transition(vtos, dtos);
671 __ ldrb(r1, at_bcp(1));
672 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
673 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
674 }
675
676 void TemplateTable::aload()
677 {
678 transition(vtos, atos);
679 locals_index(r1);
680 __ ldr(r0, iaddress(r1));
681 }
682
683 void TemplateTable::locals_index_wide(Register reg) {
684 __ ldrh(reg, at_bcp(2));
685 __ rev16w(reg, reg);
686 __ neg(reg, reg);
687 }
688
689 void TemplateTable::wide_iload() {
690 transition(vtos, itos);
691 locals_index_wide(r1);
692 __ ldr(r0, iaddress(r1));
693 }
694
695 void TemplateTable::wide_lload()
696 {
697 transition(vtos, ltos);
698 __ ldrh(r1, at_bcp(2));
699 __ rev16w(r1, r1);
700 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
701 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
702 }
703
704 void TemplateTable::wide_fload()
705 {
706 transition(vtos, ftos);
707 locals_index_wide(r1);
708 // n.b. we use ldrd here because this is a 64 bit slot
709 // this is comparable to the iload case
710 __ ldrd(v0, faddress(r1));
711 }
712
713 void TemplateTable::wide_dload()
714 {
715 transition(vtos, dtos);
716 __ ldrh(r1, at_bcp(2));
717 __ rev16w(r1, r1);
718 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
719 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
720 }
721
722 void TemplateTable::wide_aload()
723 {
724 transition(vtos, atos);
725 locals_index_wide(r1);
726 __ ldr(r0, aaddress(r1));
727 }
728
729 void TemplateTable::index_check(Register array, Register index)
730 {
731 // destroys r1, rscratch1
732 // check array
733 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
734 // sign extend index for use by indexed load
735 // __ movl2ptr(index, index);
736 // check index
737 Register length = rscratch1;
738 __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
739 __ cmpw(index, length);
740 if (index != r1) {
741 // ??? convention: move aberrant index into r1 for exception message
742 assert(r1 != array, "different registers");
743 __ mov(r1, index);
744 }
745 Label ok;
746 __ br(Assembler::LO, ok);
747 // ??? convention: move array into r3 for exception message
748 __ mov(r3, array);
749 __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
750 __ br(rscratch1);
751 __ bind(ok);
752 }
753
754 void TemplateTable::iaload()
755 {
756 transition(itos, itos);
757 __ mov(r1, r0);
758 __ pop_ptr(r0);
759 // r0: array
760 // r1: index
761 index_check(r0, r1); // leaves index in r1, kills rscratch1
762 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
763 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
764 }
765
766 void TemplateTable::laload()
767 {
768 transition(itos, ltos);
769 __ mov(r1, r0);
770 __ pop_ptr(r0);
771 // r0: array
772 // r1: index
773 index_check(r0, r1); // leaves index in r1, kills rscratch1
774 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
775 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
776 }
777
778 void TemplateTable::faload()
779 {
780 transition(itos, ftos);
781 __ mov(r1, r0);
782 __ pop_ptr(r0);
783 // r0: array
784 // r1: index
785 index_check(r0, r1); // leaves index in r1, kills rscratch1
786 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
787 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
788 }
789
790 void TemplateTable::daload()
791 {
792 transition(itos, dtos);
793 __ mov(r1, r0);
794 __ pop_ptr(r0);
795 // r0: array
796 // r1: index
797 index_check(r0, r1); // leaves index in r1, kills rscratch1
798 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
799 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
800 }
801
802 void TemplateTable::aaload()
803 {
804 transition(itos, atos);
805 __ mov(r1, r0);
806 __ pop_ptr(r0);
807 // r0: array
808 // r1: index
809 index_check(r0, r1); // leaves index in r1, kills rscratch1
810 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
811 do_oop_load(_masm,
812 Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
813 r0,
814 IS_ARRAY);
815 }
816
817 void TemplateTable::baload()
818 {
819 transition(itos, itos);
820 __ mov(r1, r0);
821 __ pop_ptr(r0);
822 // r0: array
823 // r1: index
824 index_check(r0, r1); // leaves index in r1, kills rscratch1
825 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
826 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
827 }
828
829 void TemplateTable::caload()
830 {
831 transition(itos, itos);
832 __ mov(r1, r0);
833 __ pop_ptr(r0);
834 // r0: array
835 // r1: index
836 index_check(r0, r1); // leaves index in r1, kills rscratch1
837 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
838 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
839 }
840
841 // iload followed by caload frequent pair
842 void TemplateTable::fast_icaload()
843 {
844 transition(vtos, itos);
845 // load index out of locals
846 locals_index(r2);
847 __ ldr(r1, iaddress(r2));
848
849 __ pop_ptr(r0);
850
851 // r0: array
852 // r1: index
853 index_check(r0, r1); // leaves index in r1, kills rscratch1
854 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
855 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
856 }
857
858 void TemplateTable::saload()
859 {
860 transition(itos, itos);
861 __ mov(r1, r0);
862 __ pop_ptr(r0);
863 // r0: array
864 // r1: index
865 index_check(r0, r1); // leaves index in r1, kills rscratch1
866 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
867 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
868 }
869
870 void TemplateTable::iload(int n)
871 {
872 transition(vtos, itos);
873 __ ldr(r0, iaddress(n));
874 }
875
876 void TemplateTable::lload(int n)
877 {
878 transition(vtos, ltos);
879 __ ldr(r0, laddress(n));
880 }
881
882 void TemplateTable::fload(int n)
883 {
884 transition(vtos, ftos);
885 __ ldrs(v0, faddress(n));
886 }
887
888 void TemplateTable::dload(int n)
889 {
890 transition(vtos, dtos);
891 __ ldrd(v0, daddress(n));
892 }
893
894 void TemplateTable::aload(int n)
895 {
896 transition(vtos, atos);
897 __ ldr(r0, iaddress(n));
898 }
899
900 void TemplateTable::aload_0() {
901 aload_0_internal();
902 }
903
904 void TemplateTable::nofast_aload_0() {
905 aload_0_internal(may_not_rewrite);
906 }
907
908 void TemplateTable::aload_0_internal(RewriteControl rc) {
909 // According to bytecode histograms, the pairs:
910 //
911 // _aload_0, _fast_igetfield
912 // _aload_0, _fast_agetfield
913 // _aload_0, _fast_fgetfield
914 //
915 // occur frequently. If RewriteFrequentPairs is set, the (slow)
916 // _aload_0 bytecode checks if the next bytecode is either
917 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
918 // rewrites the current bytecode into a pair bytecode; otherwise it
919 // rewrites the current bytecode into _fast_aload_0 that doesn't do
920 // the pair check anymore.
921 //
922 // Note: If the next bytecode is _getfield, the rewrite must be
923 // delayed, otherwise we may miss an opportunity for a pair.
924 //
925 // Also rewrite frequent pairs
926 // aload_0, aload_1
927 // aload_0, iload_1
928 // These bytecodes with a small amount of code are most profitable
929 // to rewrite
930 if (RewriteFrequentPairs && rc == may_rewrite) {
931 Label rewrite, done;
932 const Register bc = r4;
933
934 // get next bytecode
935 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
936
937 // if _getfield then wait with rewrite
938 __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
939 __ br(Assembler::EQ, done);
940
941 // if _igetfield then rewrite to _fast_iaccess_0
942 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
943 __ cmpw(r1, Bytecodes::_fast_igetfield);
944 __ movw(bc, Bytecodes::_fast_iaccess_0);
945 __ br(Assembler::EQ, rewrite);
946
947 // if _agetfield then rewrite to _fast_aaccess_0
948 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
949 __ cmpw(r1, Bytecodes::_fast_agetfield);
950 __ movw(bc, Bytecodes::_fast_aaccess_0);
951 __ br(Assembler::EQ, rewrite);
952
953 // if _fgetfield then rewrite to _fast_faccess_0
954 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
955 __ cmpw(r1, Bytecodes::_fast_fgetfield);
956 __ movw(bc, Bytecodes::_fast_faccess_0);
957 __ br(Assembler::EQ, rewrite);
958
959 // else rewrite to _fast_aload0
960 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
961 __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
962
963 // rewrite
964 // bc: new bytecode
965 __ bind(rewrite);
966 patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
967
968 __ bind(done);
969 }
970
971 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
972 aload(0);
973 }
974
975 void TemplateTable::istore()
976 {
977 transition(itos, vtos);
978 locals_index(r1);
979 // FIXME: We're being very pernickerty here storing a jint in a
980 // local with strw, which costs an extra instruction over what we'd
981 // be able to do with a simple str. We should just store the whole
982 // word.
983 __ lea(rscratch1, iaddress(r1));
984 __ strw(r0, Address(rscratch1));
985 }
986
987 void TemplateTable::lstore()
988 {
989 transition(ltos, vtos);
990 locals_index(r1);
991 __ str(r0, laddress(r1, rscratch1, _masm));
992 }
993
994 void TemplateTable::fstore() {
995 transition(ftos, vtos);
996 locals_index(r1);
997 __ lea(rscratch1, iaddress(r1));
998 __ strs(v0, Address(rscratch1));
999 }
1000
1001 void TemplateTable::dstore() {
1002 transition(dtos, vtos);
1003 locals_index(r1);
1004 __ strd(v0, daddress(r1, rscratch1, _masm));
1005 }
1006
1007 void TemplateTable::astore()
1008 {
1009 transition(vtos, vtos);
1010 __ pop_ptr(r0);
1011 locals_index(r1);
1012 __ str(r0, aaddress(r1));
1013 }
1014
1015 void TemplateTable::wide_istore() {
1016 transition(vtos, vtos);
1017 __ pop_i();
1018 locals_index_wide(r1);
1019 __ lea(rscratch1, iaddress(r1));
1020 __ strw(r0, Address(rscratch1));
1021 }
1022
1023 void TemplateTable::wide_lstore() {
1024 transition(vtos, vtos);
1025 __ pop_l();
1026 locals_index_wide(r1);
1027 __ str(r0, laddress(r1, rscratch1, _masm));
1028 }
1029
1030 void TemplateTable::wide_fstore() {
1031 transition(vtos, vtos);
1032 __ pop_f();
1033 locals_index_wide(r1);
1034 __ lea(rscratch1, faddress(r1));
1035 __ strs(v0, rscratch1);
1036 }
1037
1038 void TemplateTable::wide_dstore() {
1039 transition(vtos, vtos);
1040 __ pop_d();
1041 locals_index_wide(r1);
1042 __ strd(v0, daddress(r1, rscratch1, _masm));
1043 }
1044
1045 void TemplateTable::wide_astore() {
1046 transition(vtos, vtos);
1047 __ pop_ptr(r0);
1048 locals_index_wide(r1);
1049 __ str(r0, aaddress(r1));
1050 }
1051
1052 void TemplateTable::iastore() {
1053 transition(itos, vtos);
1054 __ pop_i(r1);
1055 __ pop_ptr(r3);
1056 // r0: value
1057 // r1: index
1058 // r3: array
1059 index_check(r3, r1); // prefer index in r1
1060 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
1061 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg);
1062 }
1063
1064 void TemplateTable::lastore() {
1065 transition(ltos, vtos);
1066 __ pop_i(r1);
1067 __ pop_ptr(r3);
1068 // r0: value
1069 // r1: index
1070 // r3: array
1071 index_check(r3, r1); // prefer index in r1
1072 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
1073 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg);
1074 }
1075
1076 void TemplateTable::fastore() {
1077 transition(ftos, vtos);
1078 __ pop_i(r1);
1079 __ pop_ptr(r3);
1080 // v0: value
1081 // r1: index
1082 // r3: array
1083 index_check(r3, r1); // prefer index in r1
1084 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1085 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
1086 }
1087
1088 void TemplateTable::dastore() {
1089 transition(dtos, vtos);
1090 __ pop_i(r1);
1091 __ pop_ptr(r3);
1092 // v0: value
1093 // r1: index
1094 // r3: array
1095 index_check(r3, r1); // prefer index in r1
1096 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1097 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
1098 }
1099
1100 void TemplateTable::aastore() {
1101 Label is_null, ok_is_subtype, done;
1102 transition(vtos, vtos);
1103 // stack: ..., array, index, value
1104 __ ldr(r0, at_tos()); // value
1105 __ ldr(r2, at_tos_p1()); // index
1106 __ ldr(r3, at_tos_p2()); // array
1107
1108 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1109
1110 index_check(r3, r2); // kills r1
1111 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1112
1113 // do array store check - check for NULL value first
1114 __ cbz(r0, is_null);
1115
1116 // Move subklass into r1
1117 __ load_klass(r1, r0);
1118 // Move superklass into r0
1119 __ load_klass(r0, r3);
1120 __ ldr(r0, Address(r0,
1121 ObjArrayKlass::element_klass_offset()));
1122 // Compress array + index*oopSize + 12 into a single register. Frees r2.
1123
1124 // Generate subtype check. Blows r2, r5
1125 // Superklass in r0. Subklass in r1.
1126 __ gen_subtype_check(r1, ok_is_subtype);
1127
1128 // Come here on failure
1129 // object is at TOS
1130 __ b(Interpreter::_throw_ArrayStoreException_entry);
1131
1132 // Come here on success
1133 __ bind(ok_is_subtype);
1134
1135 // Get the value we will store
1136 __ ldr(r0, at_tos());
1137 // Now store using the appropriate barrier
1138 do_oop_store(_masm, element_address, r0, IS_ARRAY);
1139 __ b(done);
1140
1141 // Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
1142 __ bind(is_null);
1143 __ profile_null_seen(r2);
1144
1145 // Store a NULL
1146 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1147
1148 // Pop stack arguments
1149 __ bind(done);
1150 __ add(esp, esp, 3 * Interpreter::stackElementSize);
1151 }
1152
1153 void TemplateTable::bastore()
1154 {
1155 transition(itos, vtos);
1156 __ pop_i(r1);
1157 __ pop_ptr(r3);
1158 // r0: value
1159 // r1: index
1160 // r3: array
1161 index_check(r3, r1); // prefer index in r1
1162
1163 // Need to check whether array is boolean or byte
1164 // since both types share the bastore bytecode.
1165 __ load_klass(r2, r3);
1166 __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
1167 int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
1168 Label L_skip;
1169 __ tbz(r2, diffbit_index, L_skip);
1170 __ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1171 __ bind(L_skip);
1172
1173 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
1174 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg);
1175 }
1176
1177 void TemplateTable::castore()
1178 {
1179 transition(itos, vtos);
1180 __ pop_i(r1);
1181 __ pop_ptr(r3);
1182 // r0: value
1183 // r1: index
1184 // r3: array
1185 index_check(r3, r1); // prefer index in r1
1186 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
1187 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg);
1188 }
1189
1190 void TemplateTable::sastore()
1191 {
1192 castore();
1193 }
1194
1195 void TemplateTable::istore(int n)
1196 {
1197 transition(itos, vtos);
1198 __ str(r0, iaddress(n));
1199 }
1200
1201 void TemplateTable::lstore(int n)
1202 {
1203 transition(ltos, vtos);
1204 __ str(r0, laddress(n));
1205 }
1206
1207 void TemplateTable::fstore(int n)
1208 {
1209 transition(ftos, vtos);
1210 __ strs(v0, faddress(n));
1211 }
1212
1213 void TemplateTable::dstore(int n)
1214 {
1215 transition(dtos, vtos);
1216 __ strd(v0, daddress(n));
1217 }
1218
1219 void TemplateTable::astore(int n)
1220 {
1221 transition(vtos, vtos);
1222 __ pop_ptr(r0);
1223 __ str(r0, iaddress(n));
1224 }
1225
1226 void TemplateTable::pop()
1227 {
1228 transition(vtos, vtos);
1229 __ add(esp, esp, Interpreter::stackElementSize);
1230 }
1231
1232 void TemplateTable::pop2()
1233 {
1234 transition(vtos, vtos);
1235 __ add(esp, esp, 2 * Interpreter::stackElementSize);
1236 }
1237
1238 void TemplateTable::dup()
1239 {
1240 transition(vtos, vtos);
1241 __ ldr(r0, Address(esp, 0));
1242 __ push(r0);
1243 // stack: ..., a, a
1244 }
1245
1246 void TemplateTable::dup_x1()
1247 {
1248 transition(vtos, vtos);
1249 // stack: ..., a, b
1250 __ ldr(r0, at_tos()); // load b
1251 __ ldr(r2, at_tos_p1()); // load a
1252 __ str(r0, at_tos_p1()); // store b
1253 __ str(r2, at_tos()); // store a
1254 __ push(r0); // push b
1255 // stack: ..., b, a, b
1256 }
1257
1258 void TemplateTable::dup_x2()
1259 {
1260 transition(vtos, vtos);
1261 // stack: ..., a, b, c
1262 __ ldr(r0, at_tos()); // load c
1263 __ ldr(r2, at_tos_p2()); // load a
1264 __ str(r0, at_tos_p2()); // store c in a
1265 __ push(r0); // push c
1266 // stack: ..., c, b, c, c
1267 __ ldr(r0, at_tos_p2()); // load b
1268 __ str(r2, at_tos_p2()); // store a in b
1269 // stack: ..., c, a, c, c
1270 __ str(r0, at_tos_p1()); // store b in c
1271 // stack: ..., c, a, b, c
1272 }
1273
1274 void TemplateTable::dup2()
1275 {
1276 transition(vtos, vtos);
1277 // stack: ..., a, b
1278 __ ldr(r0, at_tos_p1()); // load a
1279 __ push(r0); // push a
1280 __ ldr(r0, at_tos_p1()); // load b
1281 __ push(r0); // push b
1282 // stack: ..., a, b, a, b
1283 }
1284
1285 void TemplateTable::dup2_x1()
1286 {
1287 transition(vtos, vtos);
1288 // stack: ..., a, b, c
1289 __ ldr(r2, at_tos()); // load c
1290 __ ldr(r0, at_tos_p1()); // load b
1291 __ push(r0); // push b
1292 __ push(r2); // push c
1293 // stack: ..., a, b, c, b, c
1294 __ str(r2, at_tos_p3()); // store c in b
1295 // stack: ..., a, c, c, b, c
1296 __ ldr(r2, at_tos_p4()); // load a
1297 __ str(r2, at_tos_p2()); // store a in 2nd c
1298 // stack: ..., a, c, a, b, c
1299 __ str(r0, at_tos_p4()); // store b in a
1300 // stack: ..., b, c, a, b, c
1301 }
1302
1303 void TemplateTable::dup2_x2()
1304 {
1305 transition(vtos, vtos);
1306 // stack: ..., a, b, c, d
1307 __ ldr(r2, at_tos()); // load d
1308 __ ldr(r0, at_tos_p1()); // load c
1309 __ push(r0) ; // push c
1310 __ push(r2); // push d
1311 // stack: ..., a, b, c, d, c, d
1312 __ ldr(r0, at_tos_p4()); // load b
1313 __ str(r0, at_tos_p2()); // store b in d
1314 __ str(r2, at_tos_p4()); // store d in b
1315 // stack: ..., a, d, c, b, c, d
1316 __ ldr(r2, at_tos_p5()); // load a
1317 __ ldr(r0, at_tos_p3()); // load c
1318 __ str(r2, at_tos_p3()); // store a in c
1319 __ str(r0, at_tos_p5()); // store c in a
1320 // stack: ..., c, d, a, b, c, d
1321 }
1322
1323 void TemplateTable::swap()
1324 {
1325 transition(vtos, vtos);
1326 // stack: ..., a, b
1327 __ ldr(r2, at_tos_p1()); // load a
1328 __ ldr(r0, at_tos()); // load b
1329 __ str(r2, at_tos()); // store a in b
1330 __ str(r0, at_tos_p1()); // store b in a
1331 // stack: ..., b, a
1332 }
1333
1334 void TemplateTable::iop2(Operation op)
1335 {
1336 transition(itos, itos);
1337 // r0 <== r1 op r0
1338 __ pop_i(r1);
1339 switch (op) {
1340 case add : __ addw(r0, r1, r0); break;
1341 case sub : __ subw(r0, r1, r0); break;
1342 case mul : __ mulw(r0, r1, r0); break;
1343 case _and : __ andw(r0, r1, r0); break;
1344 case _or : __ orrw(r0, r1, r0); break;
1345 case _xor : __ eorw(r0, r1, r0); break;
1346 case shl : __ lslvw(r0, r1, r0); break;
1347 case shr : __ asrvw(r0, r1, r0); break;
1348 case ushr : __ lsrvw(r0, r1, r0);break;
1349 default : ShouldNotReachHere();
1350 }
1351 }
1352
1353 void TemplateTable::lop2(Operation op)
1354 {
1355 transition(ltos, ltos);
1356 // r0 <== r1 op r0
1357 __ pop_l(r1);
1358 switch (op) {
1359 case add : __ add(r0, r1, r0); break;
1360 case sub : __ sub(r0, r1, r0); break;
1361 case mul : __ mul(r0, r1, r0); break;
1362 case _and : __ andr(r0, r1, r0); break;
1363 case _or : __ orr(r0, r1, r0); break;
1364 case _xor : __ eor(r0, r1, r0); break;
1365 default : ShouldNotReachHere();
1366 }
1367 }
1368
1369 void TemplateTable::idiv()
1370 {
1371 transition(itos, itos);
1372 // explicitly check for div0
1373 Label no_div0;
1374 __ cbnzw(r0, no_div0);
1375 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1376 __ br(rscratch1);
1377 __ bind(no_div0);
1378 __ pop_i(r1);
1379 // r0 <== r1 idiv r0
1380 __ corrected_idivl(r0, r1, r0, /* want_remainder */ false);
1381 }
1382
1383 void TemplateTable::irem()
1384 {
1385 transition(itos, itos);
1386 // explicitly check for div0
1387 Label no_div0;
1388 __ cbnzw(r0, no_div0);
1389 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1390 __ br(rscratch1);
1391 __ bind(no_div0);
1392 __ pop_i(r1);
1393 // r0 <== r1 irem r0
1394 __ corrected_idivl(r0, r1, r0, /* want_remainder */ true);
1395 }
1396
1397 void TemplateTable::lmul()
1398 {
1399 transition(ltos, ltos);
1400 __ pop_l(r1);
1401 __ mul(r0, r0, r1);
1402 }
1403
1404 void TemplateTable::ldiv()
1405 {
1406 transition(ltos, ltos);
1407 // explicitly check for div0
1408 Label no_div0;
1409 __ cbnz(r0, no_div0);
1410 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1411 __ br(rscratch1);
1412 __ bind(no_div0);
1413 __ pop_l(r1);
1414 // r0 <== r1 ldiv r0
1415 __ corrected_idivq(r0, r1, r0, /* want_remainder */ false);
1416 }
1417
1418 void TemplateTable::lrem()
1419 {
1420 transition(ltos, ltos);
1421 // explicitly check for div0
1422 Label no_div0;
1423 __ cbnz(r0, no_div0);
1424 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1425 __ br(rscratch1);
1426 __ bind(no_div0);
1427 __ pop_l(r1);
1428 // r0 <== r1 lrem r0
1429 __ corrected_idivq(r0, r1, r0, /* want_remainder */ true);
1430 }
1431
1432 void TemplateTable::lshl()
1433 {
1434 transition(itos, ltos);
1435 // shift count is in r0
1436 __ pop_l(r1);
1437 __ lslv(r0, r1, r0);
1438 }
1439
1440 void TemplateTable::lshr()
1441 {
1442 transition(itos, ltos);
1443 // shift count is in r0
1444 __ pop_l(r1);
1445 __ asrv(r0, r1, r0);
1446 }
1447
1448 void TemplateTable::lushr()
1449 {
1450 transition(itos, ltos);
1451 // shift count is in r0
1452 __ pop_l(r1);
1453 __ lsrv(r0, r1, r0);
1454 }
1455
1456 void TemplateTable::fop2(Operation op)
1457 {
1458 transition(ftos, ftos);
1459 switch (op) {
1460 case add:
1461 // n.b. use ldrd because this is a 64 bit slot
1462 __ pop_f(v1);
1463 __ fadds(v0, v1, v0);
1464 break;
1465 case sub:
1466 __ pop_f(v1);
1467 __ fsubs(v0, v1, v0);
1468 break;
1469 case mul:
1470 __ pop_f(v1);
1471 __ fmuls(v0, v1, v0);
1472 break;
1473 case div:
1474 __ pop_f(v1);
1475 __ fdivs(v0, v1, v0);
1476 break;
1477 case rem:
1478 __ fmovs(v1, v0);
1479 __ pop_f(v0);
1480 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1481 break;
1482 default:
1483 ShouldNotReachHere();
1484 break;
1485 }
1486 }
1487
1488 void TemplateTable::dop2(Operation op)
1489 {
1490 transition(dtos, dtos);
1491 switch (op) {
1492 case add:
1493 // n.b. use ldrd because this is a 64 bit slot
1494 __ pop_d(v1);
1495 __ faddd(v0, v1, v0);
1496 break;
1497 case sub:
1498 __ pop_d(v1);
1499 __ fsubd(v0, v1, v0);
1500 break;
1501 case mul:
1502 __ pop_d(v1);
1503 __ fmuld(v0, v1, v0);
1504 break;
1505 case div:
1506 __ pop_d(v1);
1507 __ fdivd(v0, v1, v0);
1508 break;
1509 case rem:
1510 __ fmovd(v1, v0);
1511 __ pop_d(v0);
1512 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1513 break;
1514 default:
1515 ShouldNotReachHere();
1516 break;
1517 }
1518 }
1519
1520 void TemplateTable::ineg()
1521 {
1522 transition(itos, itos);
1523 __ negw(r0, r0);
1524
1525 }
1526
1527 void TemplateTable::lneg()
1528 {
1529 transition(ltos, ltos);
1530 __ neg(r0, r0);
1531 }
1532
1533 void TemplateTable::fneg()
1534 {
1535 transition(ftos, ftos);
1536 __ fnegs(v0, v0);
1537 }
1538
1539 void TemplateTable::dneg()
1540 {
1541 transition(dtos, dtos);
1542 __ fnegd(v0, v0);
1543 }
1544
1545 void TemplateTable::iinc()
1546 {
1547 transition(vtos, vtos);
1548 __ load_signed_byte(r1, at_bcp(2)); // get constant
1549 locals_index(r2);
1550 __ ldr(r0, iaddress(r2));
1551 __ addw(r0, r0, r1);
1552 __ str(r0, iaddress(r2));
1553 }
1554
1555 void TemplateTable::wide_iinc()
1556 {
1557 transition(vtos, vtos);
1558 // __ mov(r1, zr);
1559 __ ldrw(r1, at_bcp(2)); // get constant and index
1560 __ rev16(r1, r1);
1561 __ ubfx(r2, r1, 0, 16);
1562 __ neg(r2, r2);
1563 __ sbfx(r1, r1, 16, 16);
1564 __ ldr(r0, iaddress(r2));
1565 __ addw(r0, r0, r1);
1566 __ str(r0, iaddress(r2));
1567 }
1568
1569 void TemplateTable::convert()
1570 {
1571 // Checking
1572 #ifdef ASSERT
1573 {
1574 TosState tos_in = ilgl;
1575 TosState tos_out = ilgl;
1576 switch (bytecode()) {
1577 case Bytecodes::_i2l: // fall through
1578 case Bytecodes::_i2f: // fall through
1579 case Bytecodes::_i2d: // fall through
1580 case Bytecodes::_i2b: // fall through
1581 case Bytecodes::_i2c: // fall through
1582 case Bytecodes::_i2s: tos_in = itos; break;
1583 case Bytecodes::_l2i: // fall through
1584 case Bytecodes::_l2f: // fall through
1585 case Bytecodes::_l2d: tos_in = ltos; break;
1586 case Bytecodes::_f2i: // fall through
1587 case Bytecodes::_f2l: // fall through
1588 case Bytecodes::_f2d: tos_in = ftos; break;
1589 case Bytecodes::_d2i: // fall through
1590 case Bytecodes::_d2l: // fall through
1591 case Bytecodes::_d2f: tos_in = dtos; break;
1592 default : ShouldNotReachHere();
1593 }
1594 switch (bytecode()) {
1595 case Bytecodes::_l2i: // fall through
1596 case Bytecodes::_f2i: // fall through
1597 case Bytecodes::_d2i: // fall through
1598 case Bytecodes::_i2b: // fall through
1599 case Bytecodes::_i2c: // fall through
1600 case Bytecodes::_i2s: tos_out = itos; break;
1601 case Bytecodes::_i2l: // fall through
1602 case Bytecodes::_f2l: // fall through
1603 case Bytecodes::_d2l: tos_out = ltos; break;
1604 case Bytecodes::_i2f: // fall through
1605 case Bytecodes::_l2f: // fall through
1606 case Bytecodes::_d2f: tos_out = ftos; break;
1607 case Bytecodes::_i2d: // fall through
1608 case Bytecodes::_l2d: // fall through
1609 case Bytecodes::_f2d: tos_out = dtos; break;
1610 default : ShouldNotReachHere();
1611 }
1612 transition(tos_in, tos_out);
1613 }
1614 #endif // ASSERT
1615 // static const int64_t is_nan = 0x8000000000000000L;
1616
1617 // Conversion
1618 switch (bytecode()) {
1619 case Bytecodes::_i2l:
1620 __ sxtw(r0, r0);
1621 break;
1622 case Bytecodes::_i2f:
1623 __ scvtfws(v0, r0);
1624 break;
1625 case Bytecodes::_i2d:
1626 __ scvtfwd(v0, r0);
1627 break;
1628 case Bytecodes::_i2b:
1629 __ sxtbw(r0, r0);
1630 break;
1631 case Bytecodes::_i2c:
1632 __ uxthw(r0, r0);
1633 break;
1634 case Bytecodes::_i2s:
1635 __ sxthw(r0, r0);
1636 break;
1637 case Bytecodes::_l2i:
1638 __ uxtw(r0, r0);
1639 break;
1640 case Bytecodes::_l2f:
1641 __ scvtfs(v0, r0);
1642 break;
1643 case Bytecodes::_l2d:
1644 __ scvtfd(v0, r0);
1645 break;
1646 case Bytecodes::_f2i:
1647 {
1648 Label L_Okay;
1649 __ clear_fpsr();
1650 __ fcvtzsw(r0, v0);
1651 __ get_fpsr(r1);
1652 __ cbzw(r1, L_Okay);
1653 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i));
1654 __ bind(L_Okay);
1655 }
1656 break;
1657 case Bytecodes::_f2l:
1658 {
1659 Label L_Okay;
1660 __ clear_fpsr();
1661 __ fcvtzs(r0, v0);
1662 __ get_fpsr(r1);
1663 __ cbzw(r1, L_Okay);
1664 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1665 __ bind(L_Okay);
1666 }
1667 break;
1668 case Bytecodes::_f2d:
1669 __ fcvts(v0, v0);
1670 break;
1671 case Bytecodes::_d2i:
1672 {
1673 Label L_Okay;
1674 __ clear_fpsr();
1675 __ fcvtzdw(r0, v0);
1676 __ get_fpsr(r1);
1677 __ cbzw(r1, L_Okay);
1678 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
1679 __ bind(L_Okay);
1680 }
1681 break;
1682 case Bytecodes::_d2l:
1683 {
1684 Label L_Okay;
1685 __ clear_fpsr();
1686 __ fcvtzd(r0, v0);
1687 __ get_fpsr(r1);
1688 __ cbzw(r1, L_Okay);
1689 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1690 __ bind(L_Okay);
1691 }
1692 break;
1693 case Bytecodes::_d2f:
1694 __ fcvtd(v0, v0);
1695 break;
1696 default:
1697 ShouldNotReachHere();
1698 }
1699 }
1700
1701 void TemplateTable::lcmp()
1702 {
1703 transition(ltos, itos);
1704 Label done;
1705 __ pop_l(r1);
1706 __ cmp(r1, r0);
1707 __ mov(r0, (uint64_t)-1L);
1708 __ br(Assembler::LT, done);
1709 // __ mov(r0, 1UL);
1710 // __ csel(r0, r0, zr, Assembler::NE);
1711 // and here is a faster way
1712 __ csinc(r0, zr, zr, Assembler::EQ);
1713 __ bind(done);
1714 }
1715
1716 void TemplateTable::float_cmp(bool is_float, int unordered_result)
1717 {
1718 Label done;
1719 if (is_float) {
1720 // XXX get rid of pop here, use ... reg, mem32
1721 __ pop_f(v1);
1722 __ fcmps(v1, v0);
1723 } else {
1724 // XXX get rid of pop here, use ... reg, mem64
1725 __ pop_d(v1);
1726 __ fcmpd(v1, v0);
1727 }
1728 if (unordered_result < 0) {
1729 // we want -1 for unordered or less than, 0 for equal and 1 for
1730 // greater than.
1731 __ mov(r0, (uint64_t)-1L);
1732 // for FP LT tests less than or unordered
1733 __ br(Assembler::LT, done);
1734 // install 0 for EQ otherwise 1
1735 __ csinc(r0, zr, zr, Assembler::EQ);
1736 } else {
1737 // we want -1 for less than, 0 for equal and 1 for unordered or
1738 // greater than.
1739 __ mov(r0, 1L);
1740 // for FP HI tests greater than or unordered
1741 __ br(Assembler::HI, done);
1742 // install 0 for EQ otherwise ~0
1743 __ csinv(r0, zr, zr, Assembler::EQ);
1744
1745 }
1746 __ bind(done);
1747 }
1748
1749 void TemplateTable::branch(bool is_jsr, bool is_wide)
1750 {
1751 __ profile_taken_branch(r0, r1);
1752 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1753 InvocationCounter::counter_offset();
1754 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1755 InvocationCounter::counter_offset();
1756
1757 // load branch displacement
1758 if (!is_wide) {
1759 __ ldrh(r2, at_bcp(1));
1760 __ rev16(r2, r2);
1761 // sign extend the 16 bit value in r2
1762 __ sbfm(r2, r2, 0, 15);
1763 } else {
1764 __ ldrw(r2, at_bcp(1));
1765 __ revw(r2, r2);
1766 // sign extend the 32 bit value in r2
1767 __ sbfm(r2, r2, 0, 31);
1768 }
1769
1770 // Handle all the JSR stuff here, then exit.
1771 // It's much shorter and cleaner than intermingling with the non-JSR
1772 // normal-branch stuff occurring below.
1773
1774 if (is_jsr) {
1775 // Pre-load the next target bytecode into rscratch1
1776 __ load_unsigned_byte(rscratch1, Address(rbcp, r2));
1777 // compute return address as bci
1778 __ ldr(rscratch2, Address(rmethod, Method::const_offset()));
1779 __ add(rscratch2, rscratch2,
1780 in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
1781 __ sub(r1, rbcp, rscratch2);
1782 __ push_i(r1);
1783 // Adjust the bcp by the 16-bit displacement in r2
1784 __ add(rbcp, rbcp, r2);
1785 __ dispatch_only(vtos, /*generate_poll*/true);
1786 return;
1787 }
1788
1789 // Normal (non-jsr) branch handling
1790
1791 // Adjust the bcp by the displacement in r2
1792 __ add(rbcp, rbcp, r2);
1793
1794 assert(UseLoopCounter || !UseOnStackReplacement,
1795 "on-stack-replacement requires loop counters");
1796 Label backedge_counter_overflow;
1797 Label dispatch;
1798 if (UseLoopCounter) {
1799 // increment backedge counter for backward branches
1800 // r0: MDO
1801 // w1: MDO bumped taken-count
1802 // r2: target offset
1803 __ cmp(r2, zr);
1804 __ br(Assembler::GT, dispatch); // count only if backward branch
1805
1806 // ECN: FIXME: This code smells
1807 // check if MethodCounters exists
1808 Label has_counters;
1809 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1810 __ cbnz(rscratch1, has_counters);
1811 __ push(r0);
1812 __ push(r1);
1813 __ push(r2);
1814 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
1815 InterpreterRuntime::build_method_counters), rmethod);
1816 __ pop(r2);
1817 __ pop(r1);
1818 __ pop(r0);
1819 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1820 __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
1821 __ bind(has_counters);
1822
1823 Label no_mdo;
1824 int increment = InvocationCounter::count_increment;
1825 if (ProfileInterpreter) {
1826 // Are we profiling?
1827 __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1828 __ cbz(r1, no_mdo);
1829 // Increment the MDO backedge counter
1830 const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
1831 in_bytes(InvocationCounter::counter_offset()));
1832 const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
1833 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1834 r0, rscratch1, false, Assembler::EQ,
1835 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1836 __ b(dispatch);
1837 }
1838 __ bind(no_mdo);
1839 // Increment backedge counter in MethodCounters*
1840 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1841 const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
1842 __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
1843 r0, rscratch2, false, Assembler::EQ,
1844 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1845 __ bind(dispatch);
1846 }
1847
1848 // Pre-load the next target bytecode into rscratch1
1849 __ load_unsigned_byte(rscratch1, Address(rbcp, 0));
1850
1851 // continue with the bytecode @ target
1852 // rscratch1: target bytecode
1853 // rbcp: target bcp
1854 __ dispatch_only(vtos, /*generate_poll*/true);
1855
1856 if (UseLoopCounter && UseOnStackReplacement) {
1857 // invocation counter overflow
1858 __ bind(backedge_counter_overflow);
1859 __ neg(r2, r2);
1860 __ add(r2, r2, rbcp); // branch bcp
1861 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1862 __ call_VM(noreg,
1863 CAST_FROM_FN_PTR(address,
1864 InterpreterRuntime::frequency_counter_overflow),
1865 r2);
1866 __ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
1867
1868 // r0: osr nmethod (osr ok) or NULL (osr not possible)
1869 // w1: target bytecode
1870 // r2: scratch
1871 __ cbz(r0, dispatch); // test result -- no osr if null
1872 // nmethod may have been invalidated (VM may block upon call_VM return)
1873 __ ldrb(r2, Address(r0, nmethod::state_offset()));
1874 if (nmethod::in_use != 0)
1875 __ sub(r2, r2, nmethod::in_use);
1876 __ cbnz(r2, dispatch);
1877
1878 // We have the address of an on stack replacement routine in r0
1879 // We need to prepare to execute the OSR method. First we must
1880 // migrate the locals and monitors off of the stack.
1881
1882 __ mov(r19, r0); // save the nmethod
1883
1884 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1885
1886 // r0 is OSR buffer, move it to expected parameter location
1887 __ mov(j_rarg0, r0);
1888
1889 // remove activation
1890 // get sender esp
1891 __ ldr(esp,
1892 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
1893 // remove frame anchor
1894 __ leave();
1895 // Ensure compiled code always sees stack at proper alignment
1896 __ andr(sp, esp, -16);
1897
1898 // and begin the OSR nmethod
1899 __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
1900 __ br(rscratch1);
1901 }
1902 }
1903
1904
1905 void TemplateTable::if_0cmp(Condition cc)
1906 {
1907 transition(itos, vtos);
1908 // assume branch is more often taken than not (loops use backward branches)
1909 Label not_taken;
1910 if (cc == equal)
1911 __ cbnzw(r0, not_taken);
1912 else if (cc == not_equal)
1913 __ cbzw(r0, not_taken);
1914 else {
1915 __ andsw(zr, r0, r0);
1916 __ br(j_not(cc), not_taken);
1917 }
1918
1919 branch(false, false);
1920 __ bind(not_taken);
1921 __ profile_not_taken_branch(r0);
1922 }
1923
1924 void TemplateTable::if_icmp(Condition cc)
1925 {
1926 transition(itos, vtos);
1927 // assume branch is more often taken than not (loops use backward branches)
1928 Label not_taken;
1929 __ pop_i(r1);
1930 __ cmpw(r1, r0, Assembler::LSL);
1931 __ br(j_not(cc), not_taken);
1932 branch(false, false);
1933 __ bind(not_taken);
1934 __ profile_not_taken_branch(r0);
1935 }
1936
1937 void TemplateTable::if_nullcmp(Condition cc)
1938 {
1939 transition(atos, vtos);
1940 // assume branch is more often taken than not (loops use backward branches)
1941 Label not_taken;
1942 if (cc == equal)
1943 __ cbnz(r0, not_taken);
1944 else
1945 __ cbz(r0, not_taken);
1946 branch(false, false);
1947 __ bind(not_taken);
1948 __ profile_not_taken_branch(r0);
1949 }
1950
1951 void TemplateTable::if_acmp(Condition cc)
1952 {
1953 transition(atos, vtos);
1954 // assume branch is more often taken than not (loops use backward branches)
1955 Label not_taken;
1956 __ pop_ptr(r1);
1957 __ cmpoop(r1, r0);
1958 __ br(j_not(cc), not_taken);
1959 branch(false, false);
1960 __ bind(not_taken);
1961 __ profile_not_taken_branch(r0);
1962 }
1963
1964 void TemplateTable::ret() {
1965 transition(vtos, vtos);
1966 locals_index(r1);
1967 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1968 __ profile_ret(r1, r2);
1969 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1970 __ lea(rbcp, Address(rbcp, r1));
1971 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1972 __ dispatch_next(vtos, 0, /*generate_poll*/true);
1973 }
1974
1975 void TemplateTable::wide_ret() {
1976 transition(vtos, vtos);
1977 locals_index_wide(r1);
1978 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1979 __ profile_ret(r1, r2);
1980 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1981 __ lea(rbcp, Address(rbcp, r1));
1982 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1983 __ dispatch_next(vtos, 0, /*generate_poll*/true);
1984 }
1985
1986
1987 void TemplateTable::tableswitch() {
1988 Label default_case, continue_execution;
1989 transition(itos, vtos);
1990 // align rbcp
1991 __ lea(r1, at_bcp(BytesPerInt));
1992 __ andr(r1, r1, -BytesPerInt);
1993 // load lo & hi
1994 __ ldrw(r2, Address(r1, BytesPerInt));
1995 __ ldrw(r3, Address(r1, 2 * BytesPerInt));
1996 __ rev32(r2, r2);
1997 __ rev32(r3, r3);
1998 // check against lo & hi
1999 __ cmpw(r0, r2);
2000 __ br(Assembler::LT, default_case);
2001 __ cmpw(r0, r3);
2002 __ br(Assembler::GT, default_case);
2003 // lookup dispatch offset
2004 __ subw(r0, r0, r2);
2005 __ lea(r3, Address(r1, r0, Address::uxtw(2)));
2006 __ ldrw(r3, Address(r3, 3 * BytesPerInt));
2007 __ profile_switch_case(r0, r1, r2);
2008 // continue execution
2009 __ bind(continue_execution);
2010 __ rev32(r3, r3);
2011 __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
2012 __ add(rbcp, rbcp, r3, ext::sxtw);
2013 __ dispatch_only(vtos, /*generate_poll*/true);
2014 // handle default
2015 __ bind(default_case);
2016 __ profile_switch_default(r0);
2017 __ ldrw(r3, Address(r1, 0));
2018 __ b(continue_execution);
2019 }
2020
2021 void TemplateTable::lookupswitch() {
2022 transition(itos, itos);
2023 __ stop("lookupswitch bytecode should have been rewritten");
2024 }
2025
2026 void TemplateTable::fast_linearswitch() {
2027 transition(itos, vtos);
2028 Label loop_entry, loop, found, continue_execution;
2029 // bswap r0 so we can avoid bswapping the table entries
2030 __ rev32(r0, r0);
2031 // align rbcp
2032 __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2033 // this instruction (change offsets
2034 // below)
2035 __ andr(r19, r19, -BytesPerInt);
2036 // set counter
2037 __ ldrw(r1, Address(r19, BytesPerInt));
2038 __ rev32(r1, r1);
2039 __ b(loop_entry);
2040 // table search
2041 __ bind(loop);
2042 __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2043 __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
2044 __ cmpw(r0, rscratch1);
2045 __ br(Assembler::EQ, found);
2046 __ bind(loop_entry);
2047 __ subs(r1, r1, 1);
2048 __ br(Assembler::PL, loop);
2049 // default case
2050 __ profile_switch_default(r0);
2051 __ ldrw(r3, Address(r19, 0));
2052 __ b(continue_execution);
2053 // entry found -> get offset
2054 __ bind(found);
2055 __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2056 __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
2057 __ profile_switch_case(r1, r0, r19);
2058 // continue execution
2059 __ bind(continue_execution);
2060 __ rev32(r3, r3);
2061 __ add(rbcp, rbcp, r3, ext::sxtw);
2062 __ ldrb(rscratch1, Address(rbcp, 0));
2063 __ dispatch_only(vtos, /*generate_poll*/true);
2064 }
2065
2066 void TemplateTable::fast_binaryswitch() {
2067 transition(itos, vtos);
2068 // Implementation using the following core algorithm:
2069 //
2070 // int binary_search(int key, LookupswitchPair* array, int n) {
2071 // // Binary search according to "Methodik des Programmierens" by
2072 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2073 // int i = 0;
2074 // int j = n;
2075 // while (i+1 < j) {
2076 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2077 // // with Q: for all i: 0 <= i < n: key < a[i]
2078 // // where a stands for the array and assuming that the (inexisting)
2079 // // element a[n] is infinitely big.
2080 // int h = (i + j) >> 1;
2081 // // i < h < j
2082 // if (key < array[h].fast_match()) {
2083 // j = h;
2084 // } else {
2085 // i = h;
2086 // }
2087 // }
2088 // // R: a[i] <= key < a[i+1] or Q
2089 // // (i.e., if key is within array, i is the correct index)
2090 // return i;
2091 // }
2092
2093 // Register allocation
2094 const Register key = r0; // already set (tosca)
2095 const Register array = r1;
2096 const Register i = r2;
2097 const Register j = r3;
2098 const Register h = rscratch1;
2099 const Register temp = rscratch2;
2100
2101 // Find array start
2102 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2103 // get rid of this
2104 // instruction (change
2105 // offsets below)
2106 __ andr(array, array, -BytesPerInt);
2107
2108 // Initialize i & j
2109 __ mov(i, 0); // i = 0;
2110 __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
2111
2112 // Convert j into native byteordering
2113 __ rev32(j, j);
2114
2115 // And start
2116 Label entry;
2117 __ b(entry);
2118
2119 // binary search loop
2120 {
2121 Label loop;
2122 __ bind(loop);
2123 // int h = (i + j) >> 1;
2124 __ addw(h, i, j); // h = i + j;
2125 __ lsrw(h, h, 1); // h = (i + j) >> 1;
2126 // if (key < array[h].fast_match()) {
2127 // j = h;
2128 // } else {
2129 // i = h;
2130 // }
2131 // Convert array[h].match to native byte-ordering before compare
2132 __ ldr(temp, Address(array, h, Address::lsl(3)));
2133 __ rev32(temp, temp);
2134 __ cmpw(key, temp);
2135 // j = h if (key < array[h].fast_match())
2136 __ csel(j, h, j, Assembler::LT);
2137 // i = h if (key >= array[h].fast_match())
2138 __ csel(i, h, i, Assembler::GE);
2139 // while (i+1 < j)
2140 __ bind(entry);
2141 __ addw(h, i, 1); // i+1
2142 __ cmpw(h, j); // i+1 < j
2143 __ br(Assembler::LT, loop);
2144 }
2145
2146 // end of binary search, result index is i (must check again!)
2147 Label default_case;
2148 // Convert array[i].match to native byte-ordering before compare
2149 __ ldr(temp, Address(array, i, Address::lsl(3)));
2150 __ rev32(temp, temp);
2151 __ cmpw(key, temp);
2152 __ br(Assembler::NE, default_case);
2153
2154 // entry found -> j = offset
2155 __ add(j, array, i, ext::uxtx, 3);
2156 __ ldrw(j, Address(j, BytesPerInt));
2157 __ profile_switch_case(i, key, array);
2158 __ rev32(j, j);
2159 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2160 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2161 __ dispatch_only(vtos, /*generate_poll*/true);
2162
2163 // default case -> j = default offset
2164 __ bind(default_case);
2165 __ profile_switch_default(i);
2166 __ ldrw(j, Address(array, -2 * BytesPerInt));
2167 __ rev32(j, j);
2168 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2169 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2170 __ dispatch_only(vtos, /*generate_poll*/true);
2171 }
2172
2173
2174 void TemplateTable::_return(TosState state)
2175 {
2176 transition(state, state);
2177 assert(_desc->calls_vm(),
2178 "inconsistent calls_vm information"); // call in remove_activation
2179
2180 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2181 assert(state == vtos, "only valid state");
2182
2183 __ ldr(c_rarg1, aaddress(0));
2184 __ load_klass(r3, c_rarg1);
2185 __ ldrw(r3, Address(r3, Klass::access_flags_offset()));
2186 Label skip_register_finalizer;
2187 __ tbz(r3, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2188
2189 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2190
2191 __ bind(skip_register_finalizer);
2192 }
2193
2194 // Issue a StoreStore barrier after all stores but before return
2195 // from any constructor for any class with a final field. We don't
2196 // know if this is a finalizer, so we always do so.
2197 if (_desc->bytecode() == Bytecodes::_return)
2198 __ membar(MacroAssembler::StoreStore);
2199
2200 // Narrow result if state is itos but result type is smaller.
2201 // Need to narrow in the return bytecode rather than in generate_return_entry
2202 // since compiled code callers expect the result to already be narrowed.
2203 if (state == itos) {
2204 __ narrow(r0);
2205 }
2206
2207 __ remove_activation(state);
2208 __ ret(lr);
2209 }
2210
2211 // ----------------------------------------------------------------------------
2212 // Volatile variables demand their effects be made known to all CPU's
2213 // in order. Store buffers on most chips allow reads & writes to
2214 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2215 // without some kind of memory barrier (i.e., it's not sufficient that
2216 // the interpreter does not reorder volatile references, the hardware
2217 // also must not reorder them).
2218 //
2219 // According to the new Java Memory Model (JMM):
2220 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2221 // writes act as aquire & release, so:
2222 // (2) A read cannot let unrelated NON-volatile memory refs that
2223 // happen after the read float up to before the read. It's OK for
2224 // non-volatile memory refs that happen before the volatile read to
2225 // float down below it.
2226 // (3) Similar a volatile write cannot let unrelated NON-volatile
2227 // memory refs that happen BEFORE the write float down to after the
2228 // write. It's OK for non-volatile memory refs that happen after the
2229 // volatile write to float up before it.
2230 //
2231 // We only put in barriers around volatile refs (they are expensive),
2232 // not _between_ memory refs (that would require us to track the
2233 // flavor of the previous memory refs). Requirements (2) and (3)
2234 // require some barriers before volatile stores and after volatile
2235 // loads. These nearly cover requirement (1) but miss the
2236 // volatile-store-volatile-load case. This final case is placed after
2237 // volatile-stores although it could just as well go before
2238 // volatile-loads.
2239
2240 void TemplateTable::resolve_cache_and_index(int byte_no,
2241 Register Rcache,
2242 Register index,
2243 size_t index_size) {
2244 const Register temp = r19;
2245 assert_different_registers(Rcache, index, temp);
2246
2247 Label resolved, clinit_barrier_slow;
2248
2249 Bytecodes::Code code = bytecode();
2250 switch (code) {
2251 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2252 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2253 default: break;
2254 }
2255
2256 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2257 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2258 __ subs(zr, temp, (int) code); // have we resolved this bytecode?
2259 __ br(Assembler::EQ, resolved);
2260
2261 // resolve first time through
2262 // Class initialization barrier slow path lands here as well.
2263 __ bind(clinit_barrier_slow);
2264 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2265 __ mov(temp, (int) code);
2266 __ call_VM(noreg, entry, temp);
2267
2268 // Update registers with resolved info
2269 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2270 // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2271 // so all clients ofthis method must be modified accordingly
2272 __ bind(resolved);
2273
2274 // Class initialization barrier for static methods
2275 if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2276 __ load_resolved_method_at_index(byte_no, temp, Rcache);
2277 __ load_method_holder(temp, temp);
2278 __ clinit_barrier(temp, rscratch1, NULL, &clinit_barrier_slow);
2279 }
2280 }
2281
2282 // The Rcache and index registers must be set before call
2283 // n.b unlike x86 cache already includes the index offset
2284 void TemplateTable::load_field_cp_cache_entry(Register obj,
2285 Register cache,
2286 Register index,
2287 Register off,
2288 Register flags,
2289 bool is_static = false) {
2290 assert_different_registers(cache, index, flags, off);
2291
2292 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2293 // Field offset
2294 __ ldr(off, Address(cache, in_bytes(cp_base_offset +
2295 ConstantPoolCacheEntry::f2_offset())));
2296 // Flags
2297 __ ldrw(flags, Address(cache, in_bytes(cp_base_offset +
2298 ConstantPoolCacheEntry::flags_offset())));
2299
2300 // klass overwrite register
2301 if (is_static) {
2302 __ ldr(obj, Address(cache, in_bytes(cp_base_offset +
2303 ConstantPoolCacheEntry::f1_offset())));
2304 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2305 __ ldr(obj, Address(obj, mirror_offset));
2306 __ resolve_oop_handle(obj);
2307 }
2308 }
2309
2310 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2311 Register method,
2312 Register itable_index,
2313 Register flags,
2314 bool is_invokevirtual,
2315 bool is_invokevfinal, /*unused*/
2316 bool is_invokedynamic) {
2317 // setup registers
2318 const Register cache = rscratch2;
2319 const Register index = r4;
2320 assert_different_registers(method, flags);
2321 assert_different_registers(method, cache, index);
2322 assert_different_registers(itable_index, flags);
2323 assert_different_registers(itable_index, cache, index);
2324 // determine constant pool cache field offsets
2325 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2326 const int method_offset = in_bytes(
2327 ConstantPoolCache::base_offset() +
2328 (is_invokevirtual
2329 ? ConstantPoolCacheEntry::f2_offset()
2330 : ConstantPoolCacheEntry::f1_offset()));
2331 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2332 ConstantPoolCacheEntry::flags_offset());
2333 // access constant pool cache fields
2334 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2335 ConstantPoolCacheEntry::f2_offset());
2336
2337 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2338 resolve_cache_and_index(byte_no, cache, index, index_size);
2339 __ ldr(method, Address(cache, method_offset));
2340
2341 if (itable_index != noreg) {
2342 __ ldr(itable_index, Address(cache, index_offset));
2343 }
2344 __ ldrw(flags, Address(cache, flags_offset));
2345 }
2346
2347
2348 // The registers cache and index expected to be set before call.
2349 // Correct values of the cache and index registers are preserved.
2350 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2351 bool is_static, bool has_tos) {
2352 // do the JVMTI work here to avoid disturbing the register state below
2353 // We use c_rarg registers here because we want to use the register used in
2354 // the call to the VM
2355 if (JvmtiExport::can_post_field_access()) {
2356 // Check to see if a field access watch has been set before we
2357 // take the time to call into the VM.
2358 Label L1;
2359 assert_different_registers(cache, index, r0);
2360 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2361 __ ldrw(r0, Address(rscratch1));
2362 __ cbzw(r0, L1);
2363
2364 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2365 __ lea(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset())));
2366
2367 if (is_static) {
2368 __ mov(c_rarg1, zr); // NULL object reference
2369 } else {
2370 __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
2371 __ verify_oop(c_rarg1);
2372 }
2373 // c_rarg1: object pointer or NULL
2374 // c_rarg2: cache entry pointer
2375 // c_rarg3: jvalue object on the stack
2376 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2377 InterpreterRuntime::post_field_access),
2378 c_rarg1, c_rarg2, c_rarg3);
2379 __ get_cache_and_index_at_bcp(cache, index, 1);
2380 __ bind(L1);
2381 }
2382 }
2383
2384 void TemplateTable::pop_and_check_object(Register r)
2385 {
2386 __ pop_ptr(r);
2387 __ null_check(r); // for field access must check obj.
2388 __ verify_oop(r);
2389 }
2390
2391 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2392 {
2393 const Register cache = r2;
2394 const Register index = r3;
2395 const Register obj = r4;
2396 const Register off = r19;
2397 const Register flags = r0;
2398 const Register raw_flags = r6;
2399 const Register bc = r4; // uses same reg as obj, so don't mix them
2400
2401 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2402 jvmti_post_field_access(cache, index, is_static, false);
2403 load_field_cp_cache_entry(obj, cache, index, off, raw_flags, is_static);
2404
2405 if (!is_static) {
2406 // obj is on the stack
2407 pop_and_check_object(obj);
2408 }
2409
2410 // 8179954: We need to make sure that the code generated for
2411 // volatile accesses forms a sequentially-consistent set of
2412 // operations when combined with STLR and LDAR. Without a leading
2413 // membar it's possible for a simple Dekker test to fail if loads
2414 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
2415 // the stores in one method and we interpret the loads in another.
2416 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){
2417 Label notVolatile;
2418 __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2419 __ membar(MacroAssembler::AnyAny);
2420 __ bind(notVolatile);
2421 }
2422
2423 const Address field(obj, off);
2424
2425 Label Done, notByte, notBool, notInt, notShort, notChar,
2426 notLong, notFloat, notObj, notDouble;
2427
2428 // x86 uses a shift and mask or wings it with a shift plus assert
2429 // the mask is not needed. aarch64 just uses bitfield extract
2430 __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift,
2431 ConstantPoolCacheEntry::tos_state_bits);
2432
2433 assert(btos == 0, "change code, btos != 0");
2434 __ cbnz(flags, notByte);
2435
2436 // Don't rewrite getstatic, only getfield
2437 if (is_static) rc = may_not_rewrite;
2438
2439 // btos
2440 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2441 __ push(btos);
2442 // Rewrite bytecode to be faster
2443 if (rc == may_rewrite) {
2444 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2445 }
2446 __ b(Done);
2447
2448 __ bind(notByte);
2449 __ cmp(flags, (u1)ztos);
2450 __ br(Assembler::NE, notBool);
2451
2452 // ztos (same code as btos)
2453 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2454 __ push(ztos);
2455 // Rewrite bytecode to be faster
2456 if (rc == may_rewrite) {
2457 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2458 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2459 }
2460 __ b(Done);
2461
2462 __ bind(notBool);
2463 __ cmp(flags, (u1)atos);
2464 __ br(Assembler::NE, notObj);
2465 // atos
2466 do_oop_load(_masm, field, r0, IN_HEAP);
2467 __ push(atos);
2468 if (rc == may_rewrite) {
2469 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2470 }
2471 __ b(Done);
2472
2473 __ bind(notObj);
2474 __ cmp(flags, (u1)itos);
2475 __ br(Assembler::NE, notInt);
2476 // itos
2477 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2478 __ push(itos);
2479 // Rewrite bytecode to be faster
2480 if (rc == may_rewrite) {
2481 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2482 }
2483 __ b(Done);
2484
2485 __ bind(notInt);
2486 __ cmp(flags, (u1)ctos);
2487 __ br(Assembler::NE, notChar);
2488 // ctos
2489 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2490 __ push(ctos);
2491 // Rewrite bytecode to be faster
2492 if (rc == may_rewrite) {
2493 patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2494 }
2495 __ b(Done);
2496
2497 __ bind(notChar);
2498 __ cmp(flags, (u1)stos);
2499 __ br(Assembler::NE, notShort);
2500 // stos
2501 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
2502 __ push(stos);
2503 // Rewrite bytecode to be faster
2504 if (rc == may_rewrite) {
2505 patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2506 }
2507 __ b(Done);
2508
2509 __ bind(notShort);
2510 __ cmp(flags, (u1)ltos);
2511 __ br(Assembler::NE, notLong);
2512 // ltos
2513 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
2514 __ push(ltos);
2515 // Rewrite bytecode to be faster
2516 if (rc == may_rewrite) {
2517 patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2518 }
2519 __ b(Done);
2520
2521 __ bind(notLong);
2522 __ cmp(flags, (u1)ftos);
2523 __ br(Assembler::NE, notFloat);
2524 // ftos
2525 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2526 __ push(ftos);
2527 // Rewrite bytecode to be faster
2528 if (rc == may_rewrite) {
2529 patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2530 }
2531 __ b(Done);
2532
2533 __ bind(notFloat);
2534 #ifdef ASSERT
2535 __ cmp(flags, (u1)dtos);
2536 __ br(Assembler::NE, notDouble);
2537 #endif
2538 // dtos
2539 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2540 __ push(dtos);
2541 // Rewrite bytecode to be faster
2542 if (rc == may_rewrite) {
2543 patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2544 }
2545 #ifdef ASSERT
2546 __ b(Done);
2547
2548 __ bind(notDouble);
2549 __ stop("Bad state");
2550 #endif
2551
2552 __ bind(Done);
2553
2554 Label notVolatile;
2555 __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2556 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2557 __ bind(notVolatile);
2558 }
2559
2560
2561 void TemplateTable::getfield(int byte_no)
2562 {
2563 getfield_or_static(byte_no, false);
2564 }
2565
2566 void TemplateTable::nofast_getfield(int byte_no) {
2567 getfield_or_static(byte_no, false, may_not_rewrite);
2568 }
2569
2570 void TemplateTable::getstatic(int byte_no)
2571 {
2572 getfield_or_static(byte_no, true);
2573 }
2574
2575 // The registers cache and index expected to be set before call.
2576 // The function may destroy various registers, just not the cache and index registers.
2577 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2578 transition(vtos, vtos);
2579
2580 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2581
2582 if (JvmtiExport::can_post_field_modification()) {
2583 // Check to see if a field modification watch has been set before
2584 // we take the time to call into the VM.
2585 Label L1;
2586 assert_different_registers(cache, index, r0);
2587 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2588 __ ldrw(r0, Address(rscratch1));
2589 __ cbz(r0, L1);
2590
2591 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2592
2593 if (is_static) {
2594 // Life is simple. Null out the object pointer.
2595 __ mov(c_rarg1, zr);
2596 } else {
2597 // Life is harder. The stack holds the value on top, followed by
2598 // the object. We don't know the size of the value, though; it
2599 // could be one or two words depending on its type. As a result,
2600 // we must find the type to determine where the object is.
2601 __ ldrw(c_rarg3, Address(c_rarg2,
2602 in_bytes(cp_base_offset +
2603 ConstantPoolCacheEntry::flags_offset())));
2604 __ lsr(c_rarg3, c_rarg3,
2605 ConstantPoolCacheEntry::tos_state_shift);
2606 ConstantPoolCacheEntry::verify_tos_state_shift();
2607 Label nope2, done, ok;
2608 __ ldr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2609 __ cmpw(c_rarg3, ltos);
2610 __ br(Assembler::EQ, ok);
2611 __ cmpw(c_rarg3, dtos);
2612 __ br(Assembler::NE, nope2);
2613 __ bind(ok);
2614 __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2615 __ bind(nope2);
2616 }
2617 // cache entry pointer
2618 __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
2619 // object (tos)
2620 __ mov(c_rarg3, esp);
2621 // c_rarg1: object pointer set up above (NULL if static)
2622 // c_rarg2: cache entry pointer
2623 // c_rarg3: jvalue object on the stack
2624 __ call_VM(noreg,
2625 CAST_FROM_FN_PTR(address,
2626 InterpreterRuntime::post_field_modification),
2627 c_rarg1, c_rarg2, c_rarg3);
2628 __ get_cache_and_index_at_bcp(cache, index, 1);
2629 __ bind(L1);
2630 }
2631 }
2632
2633 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2634 transition(vtos, vtos);
2635
2636 const Register cache = r2;
2637 const Register index = r3;
2638 const Register obj = r2;
2639 const Register off = r19;
2640 const Register flags = r0;
2641 const Register bc = r4;
2642
2643 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2644 jvmti_post_field_mod(cache, index, is_static);
2645 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2646
2647 Label Done;
2648 __ mov(r5, flags);
2649
2650 {
2651 Label notVolatile;
2652 __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2653 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2654 __ bind(notVolatile);
2655 }
2656
2657 // field address
2658 const Address field(obj, off);
2659
2660 Label notByte, notBool, notInt, notShort, notChar,
2661 notLong, notFloat, notObj, notDouble;
2662
2663 // x86 uses a shift and mask or wings it with a shift plus assert
2664 // the mask is not needed. aarch64 just uses bitfield extract
2665 __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
2666
2667 assert(btos == 0, "change code, btos != 0");
2668 __ cbnz(flags, notByte);
2669
2670 // Don't rewrite putstatic, only putfield
2671 if (is_static) rc = may_not_rewrite;
2672
2673 // btos
2674 {
2675 __ pop(btos);
2676 if (!is_static) pop_and_check_object(obj);
2677 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2678 if (rc == may_rewrite) {
2679 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2680 }
2681 __ b(Done);
2682 }
2683
2684 __ bind(notByte);
2685 __ cmp(flags, (u1)ztos);
2686 __ br(Assembler::NE, notBool);
2687
2688 // ztos
2689 {
2690 __ pop(ztos);
2691 if (!is_static) pop_and_check_object(obj);
2692 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2693 if (rc == may_rewrite) {
2694 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2695 }
2696 __ b(Done);
2697 }
2698
2699 __ bind(notBool);
2700 __ cmp(flags, (u1)atos);
2701 __ br(Assembler::NE, notObj);
2702
2703 // atos
2704 {
2705 __ pop(atos);
2706 if (!is_static) pop_and_check_object(obj);
2707 // Store into the field
2708 do_oop_store(_masm, field, r0, IN_HEAP);
2709 if (rc == may_rewrite) {
2710 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2711 }
2712 __ b(Done);
2713 }
2714
2715 __ bind(notObj);
2716 __ cmp(flags, (u1)itos);
2717 __ br(Assembler::NE, notInt);
2718
2719 // itos
2720 {
2721 __ pop(itos);
2722 if (!is_static) pop_and_check_object(obj);
2723 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2724 if (rc == may_rewrite) {
2725 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2726 }
2727 __ b(Done);
2728 }
2729
2730 __ bind(notInt);
2731 __ cmp(flags, (u1)ctos);
2732 __ br(Assembler::NE, notChar);
2733
2734 // ctos
2735 {
2736 __ pop(ctos);
2737 if (!is_static) pop_and_check_object(obj);
2738 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
2739 if (rc == may_rewrite) {
2740 patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
2741 }
2742 __ b(Done);
2743 }
2744
2745 __ bind(notChar);
2746 __ cmp(flags, (u1)stos);
2747 __ br(Assembler::NE, notShort);
2748
2749 // stos
2750 {
2751 __ pop(stos);
2752 if (!is_static) pop_and_check_object(obj);
2753 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
2754 if (rc == may_rewrite) {
2755 patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
2756 }
2757 __ b(Done);
2758 }
2759
2760 __ bind(notShort);
2761 __ cmp(flags, (u1)ltos);
2762 __ br(Assembler::NE, notLong);
2763
2764 // ltos
2765 {
2766 __ pop(ltos);
2767 if (!is_static) pop_and_check_object(obj);
2768 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
2769 if (rc == may_rewrite) {
2770 patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
2771 }
2772 __ b(Done);
2773 }
2774
2775 __ bind(notLong);
2776 __ cmp(flags, (u1)ftos);
2777 __ br(Assembler::NE, notFloat);
2778
2779 // ftos
2780 {
2781 __ pop(ftos);
2782 if (!is_static) pop_and_check_object(obj);
2783 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
2784 if (rc == may_rewrite) {
2785 patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
2786 }
2787 __ b(Done);
2788 }
2789
2790 __ bind(notFloat);
2791 #ifdef ASSERT
2792 __ cmp(flags, (u1)dtos);
2793 __ br(Assembler::NE, notDouble);
2794 #endif
2795
2796 // dtos
2797 {
2798 __ pop(dtos);
2799 if (!is_static) pop_and_check_object(obj);
2800 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
2801 if (rc == may_rewrite) {
2802 patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
2803 }
2804 }
2805
2806 #ifdef ASSERT
2807 __ b(Done);
2808
2809 __ bind(notDouble);
2810 __ stop("Bad state");
2811 #endif
2812
2813 __ bind(Done);
2814
2815 {
2816 Label notVolatile;
2817 __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2818 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
2819 __ bind(notVolatile);
2820 }
2821 }
2822
2823 void TemplateTable::putfield(int byte_no)
2824 {
2825 putfield_or_static(byte_no, false);
2826 }
2827
2828 void TemplateTable::nofast_putfield(int byte_no) {
2829 putfield_or_static(byte_no, false, may_not_rewrite);
2830 }
2831
2832 void TemplateTable::putstatic(int byte_no) {
2833 putfield_or_static(byte_no, true);
2834 }
2835
2836 void TemplateTable::jvmti_post_fast_field_mod()
2837 {
2838 if (JvmtiExport::can_post_field_modification()) {
2839 // Check to see if a field modification watch has been set before
2840 // we take the time to call into the VM.
2841 Label L2;
2842 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2843 __ ldrw(c_rarg3, Address(rscratch1));
2844 __ cbzw(c_rarg3, L2);
2845 __ pop_ptr(r19); // copy the object pointer from tos
2846 __ verify_oop(r19);
2847 __ push_ptr(r19); // put the object pointer back on tos
2848 // Save tos values before call_VM() clobbers them. Since we have
2849 // to do it for every data type, we use the saved values as the
2850 // jvalue object.
2851 switch (bytecode()) { // load values into the jvalue object
2852 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
2853 case Bytecodes::_fast_bputfield: // fall through
2854 case Bytecodes::_fast_zputfield: // fall through
2855 case Bytecodes::_fast_sputfield: // fall through
2856 case Bytecodes::_fast_cputfield: // fall through
2857 case Bytecodes::_fast_iputfield: __ push_i(r0); break;
2858 case Bytecodes::_fast_dputfield: __ push_d(); break;
2859 case Bytecodes::_fast_fputfield: __ push_f(); break;
2860 case Bytecodes::_fast_lputfield: __ push_l(r0); break;
2861
2862 default:
2863 ShouldNotReachHere();
2864 }
2865 __ mov(c_rarg3, esp); // points to jvalue on the stack
2866 // access constant pool cache entry
2867 __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
2868 __ verify_oop(r19);
2869 // r19: object pointer copied above
2870 // c_rarg2: cache entry pointer
2871 // c_rarg3: jvalue object on the stack
2872 __ call_VM(noreg,
2873 CAST_FROM_FN_PTR(address,
2874 InterpreterRuntime::post_field_modification),
2875 r19, c_rarg2, c_rarg3);
2876
2877 switch (bytecode()) { // restore tos values
2878 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
2879 case Bytecodes::_fast_bputfield: // fall through
2880 case Bytecodes::_fast_zputfield: // fall through
2881 case Bytecodes::_fast_sputfield: // fall through
2882 case Bytecodes::_fast_cputfield: // fall through
2883 case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
2884 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2885 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2886 case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
2887 default: break;
2888 }
2889 __ bind(L2);
2890 }
2891 }
2892
2893 void TemplateTable::fast_storefield(TosState state)
2894 {
2895 transition(state, vtos);
2896
2897 ByteSize base = ConstantPoolCache::base_offset();
2898
2899 jvmti_post_fast_field_mod();
2900
2901 // access constant pool cache
2902 __ get_cache_and_index_at_bcp(r2, r1, 1);
2903
2904 // Must prevent reordering of the following cp cache loads with bytecode load
2905 __ membar(MacroAssembler::LoadLoad);
2906
2907 // test for volatile with r3
2908 __ ldrw(r3, Address(r2, in_bytes(base +
2909 ConstantPoolCacheEntry::flags_offset())));
2910
2911 // replace index with field offset from cache entry
2912 __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2913
2914 {
2915 Label notVolatile;
2916 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2917 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2918 __ bind(notVolatile);
2919 }
2920
2921 Label notVolatile;
2922
2923 // Get object from stack
2924 pop_and_check_object(r2);
2925
2926 // field address
2927 const Address field(r2, r1);
2928
2929 // access field
2930 switch (bytecode()) {
2931 case Bytecodes::_fast_aputfield:
2932 do_oop_store(_masm, field, r0, IN_HEAP);
2933 break;
2934 case Bytecodes::_fast_lputfield:
2935 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
2936 break;
2937 case Bytecodes::_fast_iputfield:
2938 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2939 break;
2940 case Bytecodes::_fast_zputfield:
2941 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2942 break;
2943 case Bytecodes::_fast_bputfield:
2944 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2945 break;
2946 case Bytecodes::_fast_sputfield:
2947 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
2948 break;
2949 case Bytecodes::_fast_cputfield:
2950 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
2951 break;
2952 case Bytecodes::_fast_fputfield:
2953 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
2954 break;
2955 case Bytecodes::_fast_dputfield:
2956 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
2957 break;
2958 default:
2959 ShouldNotReachHere();
2960 }
2961
2962 {
2963 Label notVolatile;
2964 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2965 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
2966 __ bind(notVolatile);
2967 }
2968 }
2969
2970
2971 void TemplateTable::fast_accessfield(TosState state)
2972 {
2973 transition(atos, state);
2974 // Do the JVMTI work here to avoid disturbing the register state below
2975 if (JvmtiExport::can_post_field_access()) {
2976 // Check to see if a field access watch has been set before we
2977 // take the time to call into the VM.
2978 Label L1;
2979 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2980 __ ldrw(r2, Address(rscratch1));
2981 __ cbzw(r2, L1);
2982 // access constant pool cache entry
2983 __ get_cache_entry_pointer_at_bcp(c_rarg2, rscratch2, 1);
2984 __ verify_oop(r0);
2985 __ push_ptr(r0); // save object pointer before call_VM() clobbers it
2986 __ mov(c_rarg1, r0);
2987 // c_rarg1: object pointer copied above
2988 // c_rarg2: cache entry pointer
2989 __ call_VM(noreg,
2990 CAST_FROM_FN_PTR(address,
2991 InterpreterRuntime::post_field_access),
2992 c_rarg1, c_rarg2);
2993 __ pop_ptr(r0); // restore object pointer
2994 __ bind(L1);
2995 }
2996
2997 // access constant pool cache
2998 __ get_cache_and_index_at_bcp(r2, r1, 1);
2999
3000 // Must prevent reordering of the following cp cache loads with bytecode load
3001 __ membar(MacroAssembler::LoadLoad);
3002
3003 __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3004 ConstantPoolCacheEntry::f2_offset())));
3005 __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3006 ConstantPoolCacheEntry::flags_offset())));
3007
3008 // r0: object
3009 __ verify_oop(r0);
3010 __ null_check(r0);
3011 const Address field(r0, r1);
3012
3013 // 8179954: We need to make sure that the code generated for
3014 // volatile accesses forms a sequentially-consistent set of
3015 // operations when combined with STLR and LDAR. Without a leading
3016 // membar it's possible for a simple Dekker test to fail if loads
3017 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3018 // the stores in one method and we interpret the loads in another.
3019 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3020 Label notVolatile;
3021 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3022 __ membar(MacroAssembler::AnyAny);
3023 __ bind(notVolatile);
3024 }
3025
3026 // access field
3027 switch (bytecode()) {
3028 case Bytecodes::_fast_agetfield:
3029 do_oop_load(_masm, field, r0, IN_HEAP);
3030 __ verify_oop(r0);
3031 break;
3032 case Bytecodes::_fast_lgetfield:
3033 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3034 break;
3035 case Bytecodes::_fast_igetfield:
3036 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3037 break;
3038 case Bytecodes::_fast_bgetfield:
3039 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3040 break;
3041 case Bytecodes::_fast_sgetfield:
3042 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3043 break;
3044 case Bytecodes::_fast_cgetfield:
3045 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3046 break;
3047 case Bytecodes::_fast_fgetfield:
3048 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3049 break;
3050 case Bytecodes::_fast_dgetfield:
3051 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3052 break;
3053 default:
3054 ShouldNotReachHere();
3055 }
3056 {
3057 Label notVolatile;
3058 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3059 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3060 __ bind(notVolatile);
3061 }
3062 }
3063
3064 void TemplateTable::fast_xaccess(TosState state)
3065 {
3066 transition(vtos, state);
3067
3068 // get receiver
3069 __ ldr(r0, aaddress(0));
3070 // access constant pool cache
3071 __ get_cache_and_index_at_bcp(r2, r3, 2);
3072 __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3073 ConstantPoolCacheEntry::f2_offset())));
3074
3075 // 8179954: We need to make sure that the code generated for
3076 // volatile accesses forms a sequentially-consistent set of
3077 // operations when combined with STLR and LDAR. Without a leading
3078 // membar it's possible for a simple Dekker test to fail if loads
3079 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3080 // the stores in one method and we interpret the loads in another.
3081 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3082 Label notVolatile;
3083 __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3084 ConstantPoolCacheEntry::flags_offset())));
3085 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3086 __ membar(MacroAssembler::AnyAny);
3087 __ bind(notVolatile);
3088 }
3089
3090 // make sure exception is reported in correct bcp range (getfield is
3091 // next instruction)
3092 __ increment(rbcp);
3093 __ null_check(r0);
3094 switch (state) {
3095 case itos:
3096 __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3097 break;
3098 case atos:
3099 do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
3100 __ verify_oop(r0);
3101 break;
3102 case ftos:
3103 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3104 break;
3105 default:
3106 ShouldNotReachHere();
3107 }
3108
3109 {
3110 Label notVolatile;
3111 __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3112 ConstantPoolCacheEntry::flags_offset())));
3113 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3114 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3115 __ bind(notVolatile);
3116 }
3117
3118 __ decrement(rbcp);
3119 }
3120
3121
3122
3123 //-----------------------------------------------------------------------------
3124 // Calls
3125
3126 void TemplateTable::prepare_invoke(int byte_no,
3127 Register method, // linked method (or i-klass)
3128 Register index, // itable index, MethodType, etc.
3129 Register recv, // if caller wants to see it
3130 Register flags // if caller wants to test it
3131 ) {
3132 // determine flags
3133 Bytecodes::Code code = bytecode();
3134 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3135 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3136 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3137 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3138 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3139 const bool load_receiver = (recv != noreg);
3140 const bool save_flags = (flags != noreg);
3141 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3142 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3143 assert(flags == noreg || flags == r3, "");
3144 assert(recv == noreg || recv == r2, "");
3145
3146 // setup registers & access constant pool cache
3147 if (recv == noreg) recv = r2;
3148 if (flags == noreg) flags = r3;
3149 assert_different_registers(method, index, recv, flags);
3150
3151 // save 'interpreter return address'
3152 __ save_bcp();
3153
3154 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3155
3156 // maybe push appendix to arguments (just before return address)
3157 if (is_invokedynamic || is_invokehandle) {
3158 Label L_no_push;
3159 __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
3160 // Push the appendix as a trailing parameter.
3161 // This must be done before we get the receiver,
3162 // since the parameter_size includes it.
3163 __ push(r19);
3164 __ mov(r19, index);
3165 __ load_resolved_reference_at_index(index, r19);
3166 __ pop(r19);
3167 __ push(index); // push appendix (MethodType, CallSite, etc.)
3168 __ bind(L_no_push);
3169 }
3170
3171 // load receiver if needed (note: no return address pushed yet)
3172 if (load_receiver) {
3173 __ andw(recv, flags, ConstantPoolCacheEntry::parameter_size_mask);
3174 // FIXME -- is this actually correct? looks like it should be 2
3175 // const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3176 // const int receiver_is_at_end = -1; // back off one slot to get receiver
3177 // Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3178 // __ movptr(recv, recv_addr);
3179 __ add(rscratch1, esp, recv, ext::uxtx, 3); // FIXME: uxtb here?
3180 __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
3181 __ verify_oop(recv);
3182 }
3183
3184 // compute return type
3185 // x86 uses a shift and mask or wings it with a shift plus assert
3186 // the mask is not needed. aarch64 just uses bitfield extract
3187 __ ubfxw(rscratch2, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
3188 // load return address
3189 {
3190 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3191 __ mov(rscratch1, table_addr);
3192 __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3)));
3193 }
3194 }
3195
3196
3197 void TemplateTable::invokevirtual_helper(Register index,
3198 Register recv,
3199 Register flags)
3200 {
3201 // Uses temporary registers r0, r3
3202 assert_different_registers(index, recv, r0, r3);
3203 // Test for an invoke of a final method
3204 Label notFinal;
3205 __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
3206
3207 const Register method = index; // method must be rmethod
3208 assert(method == rmethod,
3209 "Method must be rmethod for interpreter calling convention");
3210
3211 // do the call - the index is actually the method to call
3212 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3213
3214 // It's final, need a null check here!
3215 __ null_check(recv);
3216
3217 // profile this call
3218 __ profile_final_call(r0);
3219 __ profile_arguments_type(r0, method, r4, true);
3220
3221 __ jump_from_interpreted(method, r0);
3222
3223 __ bind(notFinal);
3224
3225 // get receiver klass
3226 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3227 __ load_klass(r0, recv);
3228
3229 // profile this call
3230 __ profile_virtual_call(r0, rlocals, r3);
3231
3232 // get target Method & entry point
3233 __ lookup_virtual_method(r0, index, method);
3234 __ profile_arguments_type(r3, method, r4, true);
3235 // FIXME -- this looks completely redundant. is it?
3236 // __ ldr(r3, Address(method, Method::interpreter_entry_offset()));
3237 __ jump_from_interpreted(method, r3);
3238 }
3239
3240 void TemplateTable::invokevirtual(int byte_no)
3241 {
3242 transition(vtos, vtos);
3243 assert(byte_no == f2_byte, "use this argument");
3244
3245 prepare_invoke(byte_no, rmethod, noreg, r2, r3);
3246
3247 // rmethod: index (actually a Method*)
3248 // r2: receiver
3249 // r3: flags
3250
3251 invokevirtual_helper(rmethod, r2, r3);
3252 }
3253
3254 void TemplateTable::invokespecial(int byte_no)
3255 {
3256 transition(vtos, vtos);
3257 assert(byte_no == f1_byte, "use this argument");
3258
3259 prepare_invoke(byte_no, rmethod, noreg, // get f1 Method*
3260 r2); // get receiver also for null check
3261 __ verify_oop(r2);
3262 __ null_check(r2);
3263 // do the call
3264 __ profile_call(r0);
3265 __ profile_arguments_type(r0, rmethod, rbcp, false);
3266 __ jump_from_interpreted(rmethod, r0);
3267 }
3268
3269 void TemplateTable::invokestatic(int byte_no)
3270 {
3271 transition(vtos, vtos);
3272 assert(byte_no == f1_byte, "use this argument");
3273
3274 prepare_invoke(byte_no, rmethod); // get f1 Method*
3275 // do the call
3276 __ profile_call(r0);
3277 __ profile_arguments_type(r0, rmethod, r4, false);
3278 __ jump_from_interpreted(rmethod, r0);
3279 }
3280
3281 void TemplateTable::fast_invokevfinal(int byte_no)
3282 {
3283 __ call_Unimplemented();
3284 }
3285
3286 void TemplateTable::invokeinterface(int byte_no) {
3287 transition(vtos, vtos);
3288 assert(byte_no == f1_byte, "use this argument");
3289
3290 prepare_invoke(byte_no, r0, rmethod, // get f1 Klass*, f2 Method*
3291 r2, r3); // recv, flags
3292
3293 // r0: interface klass (from f1)
3294 // rmethod: method (from f2)
3295 // r2: receiver
3296 // r3: flags
3297
3298 // First check for Object case, then private interface method,
3299 // then regular interface method.
3300
3301 // Special case of invokeinterface called for virtual method of
3302 // java.lang.Object. See cpCache.cpp for details.
3303 Label notObjectMethod;
3304 __ tbz(r3, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
3305
3306 invokevirtual_helper(rmethod, r2, r3);
3307 __ bind(notObjectMethod);
3308
3309 Label no_such_interface;
3310
3311 // Check for private method invocation - indicated by vfinal
3312 Label notVFinal;
3313 __ tbz(r3, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
3314
3315 // Get receiver klass into r3 - also a null check
3316 __ null_check(r2, oopDesc::klass_offset_in_bytes());
3317 __ load_klass(r3, r2);
3318
3319 Label subtype;
3320 __ check_klass_subtype(r3, r0, r4, subtype);
3321 // If we get here the typecheck failed
3322 __ b(no_such_interface);
3323 __ bind(subtype);
3324
3325 __ profile_final_call(r0);
3326 __ profile_arguments_type(r0, rmethod, r4, true);
3327 __ jump_from_interpreted(rmethod, r0);
3328
3329 __ bind(notVFinal);
3330
3331 // Get receiver klass into r3 - also a null check
3332 __ restore_locals();
3333 __ null_check(r2, oopDesc::klass_offset_in_bytes());
3334 __ load_klass(r3, r2);
3335
3336 Label no_such_method;
3337
3338 // Preserve method for throw_AbstractMethodErrorVerbose.
3339 __ mov(r16, rmethod);
3340 // Receiver subtype check against REFC.
3341 // Superklass in r0. Subklass in r3. Blows rscratch2, r13
3342 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3343 r3, r0, noreg,
3344 // outputs: scan temp. reg, scan temp. reg
3345 rscratch2, r13,
3346 no_such_interface,
3347 /*return_method=*/false);
3348
3349 // profile this call
3350 __ profile_virtual_call(r3, r13, r19);
3351
3352 // Get declaring interface class from method, and itable index
3353
3354 __ load_method_holder(r0, rmethod);
3355 __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset()));
3356 __ subw(rmethod, rmethod, Method::itable_index_max);
3357 __ negw(rmethod, rmethod);
3358
3359 // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3360 __ mov(rlocals, r3);
3361 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3362 rlocals, r0, rmethod,
3363 // outputs: method, scan temp. reg
3364 rmethod, r13,
3365 no_such_interface);
3366
3367 // rmethod,: Method to call
3368 // r2: receiver
3369 // Check for abstract method error
3370 // Note: This should be done more efficiently via a throw_abstract_method_error
3371 // interpreter entry point and a conditional jump to it in case of a null
3372 // method.
3373 __ cbz(rmethod, no_such_method);
3374
3375 __ profile_arguments_type(r3, rmethod, r13, true);
3376
3377 // do the call
3378 // r2: receiver
3379 // rmethod,: Method
3380 __ jump_from_interpreted(rmethod, r3);
3381 __ should_not_reach_here();
3382
3383 // exception handling code follows...
3384 // note: must restore interpreter registers to canonical
3385 // state for exception handling to work correctly!
3386
3387 __ bind(no_such_method);
3388 // throw exception
3389 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
3390 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3391 // Pass arguments for generating a verbose error message.
3392 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16);
3393 // the call_VM checks for exception, so we should never return here.
3394 __ should_not_reach_here();
3395
3396 __ bind(no_such_interface);
3397 // throw exception
3398 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
3399 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3400 // Pass arguments for generating a verbose error message.
3401 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3402 InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0);
3403 // the call_VM checks for exception, so we should never return here.
3404 __ should_not_reach_here();
3405 return;
3406 }
3407
3408 void TemplateTable::invokehandle(int byte_no) {
3409 transition(vtos, vtos);
3410 assert(byte_no == f1_byte, "use this argument");
3411
3412 prepare_invoke(byte_no, rmethod, r0, r2);
3413 __ verify_method_ptr(r2);
3414 __ verify_oop(r2);
3415 __ null_check(r2);
3416
3417 // FIXME: profile the LambdaForm also
3418
3419 // r13 is safe to use here as a scratch reg because it is about to
3420 // be clobbered by jump_from_interpreted().
3421 __ profile_final_call(r13);
3422 __ profile_arguments_type(r13, rmethod, r4, true);
3423
3424 __ jump_from_interpreted(rmethod, r0);
3425 }
3426
3427 void TemplateTable::invokedynamic(int byte_no) {
3428 transition(vtos, vtos);
3429 assert(byte_no == f1_byte, "use this argument");
3430
3431 prepare_invoke(byte_no, rmethod, r0);
3432
3433 // r0: CallSite object (from cpool->resolved_references[])
3434 // rmethod: MH.linkToCallSite method (from f2)
3435
3436 // Note: r0_callsite is already pushed by prepare_invoke
3437
3438 // %%% should make a type profile for any invokedynamic that takes a ref argument
3439 // profile this call
3440 __ profile_call(rbcp);
3441 __ profile_arguments_type(r3, rmethod, r13, false);
3442
3443 __ verify_oop(r0);
3444
3445 __ jump_from_interpreted(rmethod, r0);
3446 }
3447
3448
3449 //-----------------------------------------------------------------------------
3450 // Allocation
3451
3452 void TemplateTable::_new() {
3453 transition(vtos, atos);
3454
3455 __ get_unsigned_2_byte_index_at_bcp(r3, 1);
3456 Label slow_case;
3457 Label done;
3458 Label initialize_header;
3459 Label initialize_object; // including clearing the fields
3460
3461 __ get_cpool_and_tags(r4, r0);
3462 // Make sure the class we're about to instantiate has been resolved.
3463 // This is done before loading InstanceKlass to be consistent with the order
3464 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3465 const int tags_offset = Array<u1>::base_offset_in_bytes();
3466 __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
3467 __ lea(rscratch1, Address(rscratch1, tags_offset));
3468 __ ldarb(rscratch1, rscratch1);
3469 __ cmp(rscratch1, (u1)JVM_CONSTANT_Class);
3470 __ br(Assembler::NE, slow_case);
3471
3472 // get InstanceKlass
3473 __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
3474
3475 // make sure klass is initialized & doesn't have finalizer
3476 // make sure klass is fully initialized
3477 __ ldrb(rscratch1, Address(r4, InstanceKlass::init_state_offset()));
3478 __ cmp(rscratch1, (u1)InstanceKlass::fully_initialized);
3479 __ br(Assembler::NE, slow_case);
3480
3481 // get instance_size in InstanceKlass (scaled to a count of bytes)
3482 __ ldrw(r3,
3483 Address(r4,
3484 Klass::layout_helper_offset()));
3485 // test to see if it has a finalizer or is malformed in some way
3486 __ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3487
3488 // Allocate the instance:
3489 // If TLAB is enabled:
3490 // Try to allocate in the TLAB.
3491 // If fails, go to the slow path.
3492 // Else If inline contiguous allocations are enabled:
3493 // Try to allocate in eden.
3494 // If fails due to heap end, go to slow path.
3495 //
3496 // If TLAB is enabled OR inline contiguous is enabled:
3497 // Initialize the allocation.
3498 // Exit.
3499 //
3500 // Go to slow path.
3501 const bool allow_shared_alloc =
3502 Universe::heap()->supports_inline_contig_alloc();
3503
3504 if (UseTLAB) {
3505 __ tlab_allocate(r0, r3, 0, noreg, r1, slow_case);
3506
3507 if (ZeroTLAB) {
3508 // the fields have been already cleared
3509 __ b(initialize_header);
3510 } else {
3511 // initialize both the header and fields
3512 __ b(initialize_object);
3513 }
3514 } else {
3515 // Allocation in the shared Eden, if allowed.
3516 //
3517 // r3: instance size in bytes
3518 if (allow_shared_alloc) {
3519 __ eden_allocate(r0, r3, 0, r10, slow_case);
3520 }
3521 }
3522
3523 // If UseTLAB or allow_shared_alloc are true, the object is created above and
3524 // there is an initialize need. Otherwise, skip and go to the slow path.
3525 if (UseTLAB || allow_shared_alloc) {
3526 // The object is initialized before the header. If the object size is
3527 // zero, go directly to the header initialization.
3528 __ bind(initialize_object);
3529 __ sub(r3, r3, sizeof(oopDesc));
3530 __ cbz(r3, initialize_header);
3531
3532 // Initialize object fields
3533 {
3534 __ add(r2, r0, sizeof(oopDesc));
3535 Label loop;
3536 __ bind(loop);
3537 __ str(zr, Address(__ post(r2, BytesPerLong)));
3538 __ sub(r3, r3, BytesPerLong);
3539 __ cbnz(r3, loop);
3540 }
3541
3542 // initialize object header only.
3543 __ bind(initialize_header);
3544 if (UseBiasedLocking) {
3545 __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset()));
3546 } else {
3547 __ mov(rscratch1, (intptr_t)markWord::prototype().value());
3548 }
3549 __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
3550 __ store_klass_gap(r0, zr); // zero klass gap for compressed oops
3551 __ store_klass(r0, r4); // store klass last
3552
3553 {
3554 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3555 // Trigger dtrace event for fastpath
3556 __ push(atos); // save the return value
3557 __ call_VM_leaf(
3558 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3559 __ pop(atos); // restore the return value
3560
3561 }
3562 __ b(done);
3563 }
3564
3565 // slow case
3566 __ bind(slow_case);
3567 __ get_constant_pool(c_rarg1);
3568 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3569 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3570 __ verify_oop(r0);
3571
3572 // continue
3573 __ bind(done);
3574 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3575 __ membar(Assembler::StoreStore);
3576 }
3577
3578 void TemplateTable::newarray() {
3579 transition(itos, atos);
3580 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3581 __ mov(c_rarg2, r0);
3582 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3583 c_rarg1, c_rarg2);
3584 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3585 __ membar(Assembler::StoreStore);
3586 }
3587
3588 void TemplateTable::anewarray() {
3589 transition(itos, atos);
3590 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3591 __ get_constant_pool(c_rarg1);
3592 __ mov(c_rarg3, r0);
3593 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3594 c_rarg1, c_rarg2, c_rarg3);
3595 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3596 __ membar(Assembler::StoreStore);
3597 }
3598
3599 void TemplateTable::arraylength() {
3600 transition(atos, itos);
3601 __ null_check(r0, arrayOopDesc::length_offset_in_bytes());
3602 __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes()));
3603 }
3604
3605 void TemplateTable::checkcast()
3606 {
3607 transition(atos, atos);
3608 Label done, is_null, ok_is_subtype, quicked, resolved;
3609 __ cbz(r0, is_null);
3610
3611 // Get cpool & tags index
3612 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3613 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3614 // See if bytecode has already been quicked
3615 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3616 __ lea(r1, Address(rscratch1, r19));
3617 __ ldarb(r1, r1);
3618 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3619 __ br(Assembler::EQ, quicked);
3620
3621 __ push(atos); // save receiver for result, and for GC
3622 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3623 // vm_result_2 has metadata result
3624 __ get_vm_result_2(r0, rthread);
3625 __ pop(r3); // restore receiver
3626 __ b(resolved);
3627
3628 // Get superklass in r0 and subklass in r3
3629 __ bind(quicked);
3630 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3631 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3632
3633 __ bind(resolved);
3634 __ load_klass(r19, r3);
3635
3636 // Generate subtype check. Blows r2, r5. Object in r3.
3637 // Superklass in r0. Subklass in r19.
3638 __ gen_subtype_check(r19, ok_is_subtype);
3639
3640 // Come here on failure
3641 __ push(r3);
3642 // object is at TOS
3643 __ b(Interpreter::_throw_ClassCastException_entry);
3644
3645 // Come here on success
3646 __ bind(ok_is_subtype);
3647 __ mov(r0, r3); // Restore object in r3
3648
3649 // Collect counts on whether this test sees NULLs a lot or not.
3650 if (ProfileInterpreter) {
3651 __ b(done);
3652 __ bind(is_null);
3653 __ profile_null_seen(r2);
3654 } else {
3655 __ bind(is_null); // same as 'done'
3656 }
3657 __ bind(done);
3658 }
3659
3660 void TemplateTable::instanceof() {
3661 transition(atos, itos);
3662 Label done, is_null, ok_is_subtype, quicked, resolved;
3663 __ cbz(r0, is_null);
3664
3665 // Get cpool & tags index
3666 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3667 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3668 // See if bytecode has already been quicked
3669 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3670 __ lea(r1, Address(rscratch1, r19));
3671 __ ldarb(r1, r1);
3672 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3673 __ br(Assembler::EQ, quicked);
3674
3675 __ push(atos); // save receiver for result, and for GC
3676 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3677 // vm_result_2 has metadata result
3678 __ get_vm_result_2(r0, rthread);
3679 __ pop(r3); // restore receiver
3680 __ verify_oop(r3);
3681 __ load_klass(r3, r3);
3682 __ b(resolved);
3683
3684 // Get superklass in r0 and subklass in r3
3685 __ bind(quicked);
3686 __ load_klass(r3, r0);
3687 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
3688
3689 __ bind(resolved);
3690
3691 // Generate subtype check. Blows r2, r5
3692 // Superklass in r0. Subklass in r3.
3693 __ gen_subtype_check(r3, ok_is_subtype);
3694
3695 // Come here on failure
3696 __ mov(r0, 0);
3697 __ b(done);
3698 // Come here on success
3699 __ bind(ok_is_subtype);
3700 __ mov(r0, 1);
3701
3702 // Collect counts on whether this test sees NULLs a lot or not.
3703 if (ProfileInterpreter) {
3704 __ b(done);
3705 __ bind(is_null);
3706 __ profile_null_seen(r2);
3707 } else {
3708 __ bind(is_null); // same as 'done'
3709 }
3710 __ bind(done);
3711 // r0 = 0: obj == NULL or obj is not an instanceof the specified klass
3712 // r0 = 1: obj != NULL and obj is an instanceof the specified klass
3713 }
3714
3715 //-----------------------------------------------------------------------------
3716 // Breakpoints
3717 void TemplateTable::_breakpoint() {
3718 // Note: We get here even if we are single stepping..
3719 // jbug inists on setting breakpoints at every bytecode
3720 // even if we are in single step mode.
3721
3722 transition(vtos, vtos);
3723
3724 // get the unpatched byte code
3725 __ get_method(c_rarg1);
3726 __ call_VM(noreg,
3727 CAST_FROM_FN_PTR(address,
3728 InterpreterRuntime::get_original_bytecode_at),
3729 c_rarg1, rbcp);
3730 __ mov(r19, r0);
3731
3732 // post the breakpoint event
3733 __ call_VM(noreg,
3734 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3735 rmethod, rbcp);
3736
3737 // complete the execution of original bytecode
3738 __ mov(rscratch1, r19);
3739 __ dispatch_only_normal(vtos);
3740 }
3741
3742 //-----------------------------------------------------------------------------
3743 // Exceptions
3744
3745 void TemplateTable::athrow() {
3746 transition(atos, vtos);
3747 __ null_check(r0);
3748 __ b(Interpreter::throw_exception_entry());
3749 }
3750
3751 //-----------------------------------------------------------------------------
3752 // Synchronization
3753 //
3754 // Note: monitorenter & exit are symmetric routines; which is reflected
3755 // in the assembly code structure as well
3756 //
3757 // Stack layout:
3758 //
3759 // [expressions ] <--- esp = expression stack top
3760 // ..
3761 // [expressions ]
3762 // [monitor entry] <--- monitor block top = expression stack bot
3763 // ..
3764 // [monitor entry]
3765 // [frame data ] <--- monitor block bot
3766 // ...
3767 // [saved rbp ] <--- rbp
3768 void TemplateTable::monitorenter()
3769 {
3770 transition(atos, vtos);
3771
3772 // check for NULL object
3773 __ null_check(r0);
3774
3775 const Address monitor_block_top(
3776 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3777 const Address monitor_block_bot(
3778 rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3779 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3780
3781 Label allocated;
3782
3783 // initialize entry pointer
3784 __ mov(c_rarg1, zr); // points to free slot or NULL
3785
3786 // find a free slot in the monitor block (result in c_rarg1)
3787 {
3788 Label entry, loop, exit;
3789 __ ldr(c_rarg3, monitor_block_top); // points to current entry,
3790 // starting with top-most entry
3791 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3792
3793 __ b(entry);
3794
3795 __ bind(loop);
3796 // check if current entry is used
3797 // if not used then remember entry in c_rarg1
3798 __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3799 __ cmp(zr, rscratch1);
3800 __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
3801 // check if current entry is for same object
3802 __ cmp(r0, rscratch1);
3803 // if same object then stop searching
3804 __ br(Assembler::EQ, exit);
3805 // otherwise advance to next entry
3806 __ add(c_rarg3, c_rarg3, entry_size);
3807 __ bind(entry);
3808 // check if bottom reached
3809 __ cmp(c_rarg3, c_rarg2);
3810 // if not at bottom then check this entry
3811 __ br(Assembler::NE, loop);
3812 __ bind(exit);
3813 }
3814
3815 __ cbnz(c_rarg1, allocated); // check if a slot has been found and
3816 // if found, continue with that on
3817
3818 // allocate one if there's no free slot
3819 {
3820 Label entry, loop;
3821 // 1. compute new pointers // rsp: old expression stack top
3822 __ ldr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3823 __ sub(esp, esp, entry_size); // move expression stack top
3824 __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
3825 __ mov(c_rarg3, esp); // set start value for copy loop
3826 __ str(c_rarg1, monitor_block_bot); // set new monitor block bottom
3827
3828 __ sub(sp, sp, entry_size); // make room for the monitor
3829
3830 __ b(entry);
3831 // 2. move expression stack contents
3832 __ bind(loop);
3833 __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3834 // word from old location
3835 __ str(c_rarg2, Address(c_rarg3, 0)); // and store it at new location
3836 __ add(c_rarg3, c_rarg3, wordSize); // advance to next word
3837 __ bind(entry);
3838 __ cmp(c_rarg3, c_rarg1); // check if bottom reached
3839 __ br(Assembler::NE, loop); // if not at bottom then
3840 // copy next word
3841 }
3842
3843 // call run-time routine
3844 // c_rarg1: points to monitor entry
3845 __ bind(allocated);
3846
3847 // Increment bcp to point to the next bytecode, so exception
3848 // handling for async. exceptions work correctly.
3849 // The object has already been poped from the stack, so the
3850 // expression stack looks correct.
3851 __ increment(rbcp);
3852
3853 // store object
3854 __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3855 __ lock_object(c_rarg1);
3856
3857 // check to make sure this monitor doesn't cause stack overflow after locking
3858 __ save_bcp(); // in case of exception
3859 __ generate_stack_overflow_check(0);
3860
3861 // The bcp has already been incremented. Just need to dispatch to
3862 // next instruction.
3863 __ dispatch_next(vtos);
3864 }
3865
3866
3867 void TemplateTable::monitorexit()
3868 {
3869 transition(atos, vtos);
3870
3871 // check for NULL object
3872 __ null_check(r0);
3873
3874 const Address monitor_block_top(
3875 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3876 const Address monitor_block_bot(
3877 rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3878 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3879
3880 Label found;
3881
3882 // find matching slot
3883 {
3884 Label entry, loop;
3885 __ ldr(c_rarg1, monitor_block_top); // points to current entry,
3886 // starting with top-most entry
3887 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3888 // of monitor block
3889 __ b(entry);
3890
3891 __ bind(loop);
3892 // check if current entry is for same object
3893 __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3894 __ cmp(r0, rscratch1);
3895 // if same object then stop searching
3896 __ br(Assembler::EQ, found);
3897 // otherwise advance to next entry
3898 __ add(c_rarg1, c_rarg1, entry_size);
3899 __ bind(entry);
3900 // check if bottom reached
3901 __ cmp(c_rarg1, c_rarg2);
3902 // if not at bottom then check this entry
3903 __ br(Assembler::NE, loop);
3904 }
3905
3906 // error handling. Unlocking was not block-structured
3907 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3908 InterpreterRuntime::throw_illegal_monitor_state_exception));
3909 __ should_not_reach_here();
3910
3911 // call run-time routine
3912 __ bind(found);
3913 __ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
3914 __ unlock_object(c_rarg1);
3915 __ pop_ptr(r0); // discard object
3916 }
3917
3918
3919 // Wide instructions
3920 void TemplateTable::wide()
3921 {
3922 __ load_unsigned_byte(r19, at_bcp(1));
3923 __ mov(rscratch1, (address)Interpreter::_wentry_point);
3924 __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3)));
3925 __ br(rscratch1);
3926 }
3927
3928
3929 // Multi arrays
3930 void TemplateTable::multianewarray() {
3931 transition(vtos, atos);
3932 __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions
3933 // last dim is on top of stack; we want address of first one:
3934 // first_addr = last_addr + (ndims - 1) * wordSize
3935 __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3)));
3936 __ sub(c_rarg1, c_rarg1, wordSize);
3937 call_VM(r0,
3938 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3939 c_rarg1);
3940 __ load_unsigned_byte(r1, at_bcp(3));
3941 __ lea(esp, Address(esp, r1, Address::uxtw(3)));
3942 }