1 /*
2 * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "compiler/compilerDefinitions.inline.hpp"
29 #include "gc/shared/barrierSetAssembler.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "interpreter/interp_masm.hpp"
35 #include "interpreter/templateTable.hpp"
36 #include "memory/universe.hpp"
37 #include "oops/methodData.hpp"
38 #include "oops/method.inline.hpp"
39 #include "oops/objArrayKlass.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "oops/resolvedFieldEntry.hpp"
42 #include "oops/resolvedIndyEntry.hpp"
43 #include "oops/resolvedMethodEntry.hpp"
44 #include "prims/jvmtiExport.hpp"
45 #include "prims/methodHandles.hpp"
46 #include "runtime/frame.inline.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "runtime/synchronizer.hpp"
50 #include "utilities/powerOfTwo.hpp"
51
52 #define __ _masm->
53
54 // Address computation: local variables
55
56 static inline Address iaddress(int n) {
57 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
58 }
59
60 static inline Address laddress(int n) {
61 return iaddress(n + 1);
62 }
63
64 static inline Address faddress(int n) {
65 return iaddress(n);
66 }
67
68 static inline Address daddress(int n) {
69 return laddress(n);
70 }
71
72 static inline Address aaddress(int n) {
73 return iaddress(n);
74 }
75
76 static inline Address iaddress(Register r) {
77 return Address(rlocals, r, Address::lsl(3));
78 }
79
80 static inline Address laddress(Register r, Register scratch,
81 InterpreterMacroAssembler* _masm) {
82 __ lea(scratch, Address(rlocals, r, Address::lsl(3)));
83 return Address(scratch, Interpreter::local_offset_in_bytes(1));
84 }
85
86 static inline Address faddress(Register r) {
87 return iaddress(r);
88 }
89
90 static inline Address daddress(Register r, Register scratch,
91 InterpreterMacroAssembler* _masm) {
92 return laddress(r, scratch, _masm);
93 }
94
95 static inline Address aaddress(Register r) {
96 return iaddress(r);
97 }
98
99 static inline Address at_rsp() {
100 return Address(esp, 0);
101 }
102
103 // At top of Java expression stack which may be different than esp(). It
104 // isn't for category 1 objects.
105 static inline Address at_tos () {
106 return Address(esp, Interpreter::expr_offset_in_bytes(0));
107 }
108
109 static inline Address at_tos_p1() {
110 return Address(esp, Interpreter::expr_offset_in_bytes(1));
111 }
112
113 static inline Address at_tos_p2() {
114 return Address(esp, Interpreter::expr_offset_in_bytes(2));
115 }
116
117 static inline Address at_tos_p3() {
118 return Address(esp, Interpreter::expr_offset_in_bytes(3));
119 }
120
121 static inline Address at_tos_p4() {
122 return Address(esp, Interpreter::expr_offset_in_bytes(4));
123 }
124
125 static inline Address at_tos_p5() {
126 return Address(esp, Interpreter::expr_offset_in_bytes(5));
127 }
128
129 // Condition conversion
130 static Assembler::Condition j_not(TemplateTable::Condition cc) {
131 switch (cc) {
132 case TemplateTable::equal : return Assembler::NE;
133 case TemplateTable::not_equal : return Assembler::EQ;
134 case TemplateTable::less : return Assembler::GE;
135 case TemplateTable::less_equal : return Assembler::GT;
136 case TemplateTable::greater : return Assembler::LE;
137 case TemplateTable::greater_equal: return Assembler::LT;
138 }
139 ShouldNotReachHere();
140 return Assembler::EQ;
141 }
142
143
144 // Miscellaneous helper routines
145 // Store an oop (or null) at the Address described by obj.
146 // If val == noreg this means store a null
147 static void do_oop_store(InterpreterMacroAssembler* _masm,
148 Address dst,
149 Register val,
150 DecoratorSet decorators) {
151 assert(val == noreg || val == r0, "parameter is just for looks");
152 __ store_heap_oop(dst, val, r10, r11, r3, decorators);
153 }
154
155 static void do_oop_load(InterpreterMacroAssembler* _masm,
156 Address src,
157 Register dst,
158 DecoratorSet decorators) {
159 __ load_heap_oop(dst, src, r10, r11, decorators);
160 }
161
162 Address TemplateTable::at_bcp(int offset) {
163 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
164 return Address(rbcp, offset);
165 }
166
167 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
168 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
169 int byte_no)
170 {
171 if (!RewriteBytecodes) return;
172 Label L_patch_done;
173
174 switch (bc) {
175 case Bytecodes::_fast_aputfield:
176 case Bytecodes::_fast_bputfield:
177 case Bytecodes::_fast_zputfield:
178 case Bytecodes::_fast_cputfield:
179 case Bytecodes::_fast_dputfield:
180 case Bytecodes::_fast_fputfield:
181 case Bytecodes::_fast_iputfield:
182 case Bytecodes::_fast_lputfield:
183 case Bytecodes::_fast_sputfield:
184 {
185 // We skip bytecode quickening for putfield instructions when
186 // the put_code written to the constant pool cache is zero.
187 // This is required so that every execution of this instruction
188 // calls out to InterpreterRuntime::resolve_get_put to do
189 // additional, required work.
190 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
191 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
192 __ load_field_entry(temp_reg, bc_reg);
193 if (byte_no == f1_byte) {
194 __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset())));
195 } else {
196 __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset())));
197 }
198 // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
199 __ ldarb(temp_reg, temp_reg);
200 __ movw(bc_reg, bc);
201 __ cbzw(temp_reg, L_patch_done); // don't patch
202 }
203 break;
204 default:
205 assert(byte_no == -1, "sanity");
206 // the pair bytecodes have already done the load.
207 if (load_bc_into_bc_reg) {
208 __ movw(bc_reg, bc);
209 }
210 }
211
212 if (JvmtiExport::can_post_breakpoint()) {
213 Label L_fast_patch;
214 // if a breakpoint is present we can't rewrite the stream directly
215 __ load_unsigned_byte(temp_reg, at_bcp(0));
216 __ cmpw(temp_reg, Bytecodes::_breakpoint);
217 __ br(Assembler::NE, L_fast_patch);
218 // Let breakpoint table handling rewrite to quicker bytecode
219 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
220 __ b(L_patch_done);
221 __ bind(L_fast_patch);
222 }
223
224 #ifdef ASSERT
225 Label L_okay;
226 __ load_unsigned_byte(temp_reg, at_bcp(0));
227 __ cmpw(temp_reg, (int) Bytecodes::java_code(bc));
228 __ br(Assembler::EQ, L_okay);
229 __ cmpw(temp_reg, bc_reg);
230 __ br(Assembler::EQ, L_okay);
231 __ stop("patching the wrong bytecode");
232 __ bind(L_okay);
233 #endif
234
235 // patch bytecode
236 __ strb(bc_reg, at_bcp(0));
237 __ bind(L_patch_done);
238 }
239
240
241 // Individual instructions
242
243 void TemplateTable::nop() {
244 transition(vtos, vtos);
245 // nothing to do
246 }
247
248 void TemplateTable::shouldnotreachhere() {
249 transition(vtos, vtos);
250 __ stop("shouldnotreachhere bytecode");
251 }
252
253 void TemplateTable::aconst_null()
254 {
255 transition(vtos, atos);
256 __ mov(r0, 0);
257 }
258
259 void TemplateTable::iconst(int value)
260 {
261 transition(vtos, itos);
262 __ mov(r0, value);
263 }
264
265 void TemplateTable::lconst(int value)
266 {
267 __ mov(r0, value);
268 }
269
270 void TemplateTable::fconst(int value)
271 {
272 transition(vtos, ftos);
273 switch (value) {
274 case 0:
275 __ fmovs(v0, 0.0);
276 break;
277 case 1:
278 __ fmovs(v0, 1.0);
279 break;
280 case 2:
281 __ fmovs(v0, 2.0);
282 break;
283 default:
284 ShouldNotReachHere();
285 break;
286 }
287 }
288
289 void TemplateTable::dconst(int value)
290 {
291 transition(vtos, dtos);
292 switch (value) {
293 case 0:
294 __ fmovd(v0, 0.0);
295 break;
296 case 1:
297 __ fmovd(v0, 1.0);
298 break;
299 case 2:
300 __ fmovd(v0, 2.0);
301 break;
302 default:
303 ShouldNotReachHere();
304 break;
305 }
306 }
307
308 void TemplateTable::bipush()
309 {
310 transition(vtos, itos);
311 __ load_signed_byte32(r0, at_bcp(1));
312 }
313
314 void TemplateTable::sipush()
315 {
316 transition(vtos, itos);
317 __ load_unsigned_short(r0, at_bcp(1));
318 __ revw(r0, r0);
319 __ asrw(r0, r0, 16);
320 }
321
322 void TemplateTable::ldc(LdcType type)
323 {
324 transition(vtos, vtos);
325 Label call_ldc, notFloat, notClass, notInt, Done;
326
327 if (is_ldc_wide(type)) {
328 __ get_unsigned_2_byte_index_at_bcp(r1, 1);
329 } else {
330 __ load_unsigned_byte(r1, at_bcp(1));
331 }
332 __ get_cpool_and_tags(r2, r0);
333
334 const int base_offset = ConstantPool::header_size() * wordSize;
335 const int tags_offset = Array<u1>::base_offset_in_bytes();
336
337 // get type
338 __ add(r3, r1, tags_offset);
339 __ lea(r3, Address(r0, r3));
340 __ ldarb(r3, r3);
341
342 // unresolved class - get the resolved class
343 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass);
344 __ br(Assembler::EQ, call_ldc);
345
346 // unresolved class in error state - call into runtime to throw the error
347 // from the first resolution attempt
348 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError);
349 __ br(Assembler::EQ, call_ldc);
350
351 // resolved class - need to call vm to get java mirror of the class
352 __ cmp(r3, (u1)JVM_CONSTANT_Class);
353 __ br(Assembler::NE, notClass);
354
355 __ bind(call_ldc);
356 __ mov(c_rarg1, is_ldc_wide(type) ? 1 : 0);
357 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
358 __ push_ptr(r0);
359 __ verify_oop(r0);
360 __ b(Done);
361
362 __ bind(notClass);
363 __ cmp(r3, (u1)JVM_CONSTANT_Float);
364 __ br(Assembler::NE, notFloat);
365 // ftos
366 __ adds(r1, r2, r1, Assembler::LSL, 3);
367 __ ldrs(v0, Address(r1, base_offset));
368 __ push_f();
369 __ b(Done);
370
371 __ bind(notFloat);
372
373 __ cmp(r3, (u1)JVM_CONSTANT_Integer);
374 __ br(Assembler::NE, notInt);
375
376 // itos
377 __ adds(r1, r2, r1, Assembler::LSL, 3);
378 __ ldrw(r0, Address(r1, base_offset));
379 __ push_i(r0);
380 __ b(Done);
381
382 __ bind(notInt);
383 condy_helper(Done);
384
385 __ bind(Done);
386 }
387
388 // Fast path for caching oop constants.
389 void TemplateTable::fast_aldc(LdcType type)
390 {
391 transition(vtos, atos);
392
393 Register result = r0;
394 Register tmp = r1;
395 Register rarg = r2;
396
397 int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
398
399 Label resolved;
400
401 // We are resolved if the resolved reference cache entry contains a
402 // non-null object (String, MethodType, etc.)
403 assert_different_registers(result, tmp);
404 __ get_cache_index_at_bcp(tmp, 1, index_size);
405 __ load_resolved_reference_at_index(result, tmp);
406 __ cbnz(result, resolved);
407
408 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
409
410 // first time invocation - must resolve first
411 __ mov(rarg, (int)bytecode());
412 __ call_VM(result, entry, rarg);
413
414 __ bind(resolved);
415
416 { // Check for the null sentinel.
417 // If we just called the VM, it already did the mapping for us,
418 // but it's harmless to retry.
419 Label notNull;
420
421 // Stash null_sentinel address to get its value later
422 __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
423 __ ldr(tmp, Address(rarg));
424 __ resolve_oop_handle(tmp, r5, rscratch2);
425 __ cmpoop(result, tmp);
426 __ br(Assembler::NE, notNull);
427 __ mov(result, 0); // null object reference
428 __ bind(notNull);
429 }
430
431 if (VerifyOops) {
432 // Safe to call with 0 result
433 __ verify_oop(result);
434 }
435 }
436
437 void TemplateTable::ldc2_w()
438 {
439 transition(vtos, vtos);
440 Label notDouble, notLong, Done;
441 __ get_unsigned_2_byte_index_at_bcp(r0, 1);
442
443 __ get_cpool_and_tags(r1, r2);
444 const int base_offset = ConstantPool::header_size() * wordSize;
445 const int tags_offset = Array<u1>::base_offset_in_bytes();
446
447 // get type
448 __ lea(r2, Address(r2, r0, Address::lsl(0)));
449 __ load_unsigned_byte(r2, Address(r2, tags_offset));
450 __ cmpw(r2, (int)JVM_CONSTANT_Double);
451 __ br(Assembler::NE, notDouble);
452
453 // dtos
454 __ lea (r2, Address(r1, r0, Address::lsl(3)));
455 __ ldrd(v0, Address(r2, base_offset));
456 __ push_d();
457 __ b(Done);
458
459 __ bind(notDouble);
460 __ cmpw(r2, (int)JVM_CONSTANT_Long);
461 __ br(Assembler::NE, notLong);
462
463 // ltos
464 __ lea(r0, Address(r1, r0, Address::lsl(3)));
465 __ ldr(r0, Address(r0, base_offset));
466 __ push_l();
467 __ b(Done);
468
469 __ bind(notLong);
470 condy_helper(Done);
471
472 __ bind(Done);
473 }
474
475 void TemplateTable::condy_helper(Label& Done)
476 {
477 Register obj = r0;
478 Register rarg = r1;
479 Register flags = r2;
480 Register off = r3;
481
482 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
483
484 __ mov(rarg, (int) bytecode());
485 __ call_VM(obj, entry, rarg);
486
487 __ get_vm_result_2(flags, rthread);
488
489 // VMr = obj = base address to find primitive value to push
490 // VMr2 = flags = (tos, off) using format of CPCE::_flags
491 __ mov(off, flags);
492 __ andw(off, off, ConstantPoolCache::field_index_mask);
493
494 const Address field(obj, off);
495
496 // What sort of thing are we loading?
497 // x86 uses a shift and mask or wings it with a shift plus assert
498 // the mask is not needed. aarch64 just uses bitfield extract
499 __ ubfxw(flags, flags, ConstantPoolCache::tos_state_shift,
500 ConstantPoolCache::tos_state_bits);
501
502 switch (bytecode()) {
503 case Bytecodes::_ldc:
504 case Bytecodes::_ldc_w:
505 {
506 // tos in (itos, ftos, stos, btos, ctos, ztos)
507 Label notInt, notFloat, notShort, notByte, notChar, notBool;
508 __ cmpw(flags, itos);
509 __ br(Assembler::NE, notInt);
510 // itos
511 __ ldrw(r0, field);
512 __ push(itos);
513 __ b(Done);
514
515 __ bind(notInt);
516 __ cmpw(flags, ftos);
517 __ br(Assembler::NE, notFloat);
518 // ftos
519 __ load_float(field);
520 __ push(ftos);
521 __ b(Done);
522
523 __ bind(notFloat);
524 __ cmpw(flags, stos);
525 __ br(Assembler::NE, notShort);
526 // stos
527 __ load_signed_short(r0, field);
528 __ push(stos);
529 __ b(Done);
530
531 __ bind(notShort);
532 __ cmpw(flags, btos);
533 __ br(Assembler::NE, notByte);
534 // btos
535 __ load_signed_byte(r0, field);
536 __ push(btos);
537 __ b(Done);
538
539 __ bind(notByte);
540 __ cmpw(flags, ctos);
541 __ br(Assembler::NE, notChar);
542 // ctos
543 __ load_unsigned_short(r0, field);
544 __ push(ctos);
545 __ b(Done);
546
547 __ bind(notChar);
548 __ cmpw(flags, ztos);
549 __ br(Assembler::NE, notBool);
550 // ztos
551 __ load_signed_byte(r0, field);
552 __ push(ztos);
553 __ b(Done);
554
555 __ bind(notBool);
556 break;
557 }
558
559 case Bytecodes::_ldc2_w:
560 {
561 Label notLong, notDouble;
562 __ cmpw(flags, ltos);
563 __ br(Assembler::NE, notLong);
564 // ltos
565 __ ldr(r0, field);
566 __ push(ltos);
567 __ b(Done);
568
569 __ bind(notLong);
570 __ cmpw(flags, dtos);
571 __ br(Assembler::NE, notDouble);
572 // dtos
573 __ load_double(field);
574 __ push(dtos);
575 __ b(Done);
576
577 __ bind(notDouble);
578 break;
579 }
580
581 default:
582 ShouldNotReachHere();
583 }
584
585 __ stop("bad ldc/condy");
586 }
587
588 void TemplateTable::locals_index(Register reg, int offset)
589 {
590 __ ldrb(reg, at_bcp(offset));
591 __ neg(reg, reg);
592 }
593
594 void TemplateTable::iload() {
595 iload_internal();
596 }
597
598 void TemplateTable::nofast_iload() {
599 iload_internal(may_not_rewrite);
600 }
601
602 void TemplateTable::iload_internal(RewriteControl rc) {
603 transition(vtos, itos);
604 if (RewriteFrequentPairs && rc == may_rewrite) {
605 Label rewrite, done;
606 Register bc = r4;
607
608 // get next bytecode
609 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
610
611 // if _iload, wait to rewrite to iload2. We only want to rewrite the
612 // last two iloads in a pair. Comparing against fast_iload means that
613 // the next bytecode is neither an iload or a caload, and therefore
614 // an iload pair.
615 __ cmpw(r1, Bytecodes::_iload);
616 __ br(Assembler::EQ, done);
617
618 // if _fast_iload rewrite to _fast_iload2
619 __ cmpw(r1, Bytecodes::_fast_iload);
620 __ movw(bc, Bytecodes::_fast_iload2);
621 __ br(Assembler::EQ, rewrite);
622
623 // if _caload rewrite to _fast_icaload
624 __ cmpw(r1, Bytecodes::_caload);
625 __ movw(bc, Bytecodes::_fast_icaload);
626 __ br(Assembler::EQ, rewrite);
627
628 // else rewrite to _fast_iload
629 __ movw(bc, Bytecodes::_fast_iload);
630
631 // rewrite
632 // bc: new bytecode
633 __ bind(rewrite);
634 patch_bytecode(Bytecodes::_iload, bc, r1, false);
635 __ bind(done);
636
637 }
638
639 // do iload, get the local value into tos
640 locals_index(r1);
641 __ ldr(r0, iaddress(r1));
642
643 }
644
645 void TemplateTable::fast_iload2()
646 {
647 transition(vtos, itos);
648 locals_index(r1);
649 __ ldr(r0, iaddress(r1));
650 __ push(itos);
651 locals_index(r1, 3);
652 __ ldr(r0, iaddress(r1));
653 }
654
655 void TemplateTable::fast_iload()
656 {
657 transition(vtos, itos);
658 locals_index(r1);
659 __ ldr(r0, iaddress(r1));
660 }
661
662 void TemplateTable::lload()
663 {
664 transition(vtos, ltos);
665 __ ldrb(r1, at_bcp(1));
666 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
667 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
668 }
669
670 void TemplateTable::fload()
671 {
672 transition(vtos, ftos);
673 locals_index(r1);
674 // n.b. we use ldrd here because this is a 64 bit slot
675 // this is comparable to the iload case
676 __ ldrd(v0, faddress(r1));
677 }
678
679 void TemplateTable::dload()
680 {
681 transition(vtos, dtos);
682 __ ldrb(r1, at_bcp(1));
683 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
684 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
685 }
686
687 void TemplateTable::aload()
688 {
689 transition(vtos, atos);
690 locals_index(r1);
691 __ ldr(r0, iaddress(r1));
692 }
693
694 void TemplateTable::locals_index_wide(Register reg) {
695 __ ldrh(reg, at_bcp(2));
696 __ rev16w(reg, reg);
697 __ neg(reg, reg);
698 }
699
700 void TemplateTable::wide_iload() {
701 transition(vtos, itos);
702 locals_index_wide(r1);
703 __ ldr(r0, iaddress(r1));
704 }
705
706 void TemplateTable::wide_lload()
707 {
708 transition(vtos, ltos);
709 __ ldrh(r1, at_bcp(2));
710 __ rev16w(r1, r1);
711 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
712 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
713 }
714
715 void TemplateTable::wide_fload()
716 {
717 transition(vtos, ftos);
718 locals_index_wide(r1);
719 // n.b. we use ldrd here because this is a 64 bit slot
720 // this is comparable to the iload case
721 __ ldrd(v0, faddress(r1));
722 }
723
724 void TemplateTable::wide_dload()
725 {
726 transition(vtos, dtos);
727 __ ldrh(r1, at_bcp(2));
728 __ rev16w(r1, r1);
729 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
730 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
731 }
732
733 void TemplateTable::wide_aload()
734 {
735 transition(vtos, atos);
736 locals_index_wide(r1);
737 __ ldr(r0, aaddress(r1));
738 }
739
740 void TemplateTable::index_check(Register array, Register index)
741 {
742 // destroys r1, rscratch1
743 // sign extend index for use by indexed load
744 // __ movl2ptr(index, index);
745 // check index
746 Register length = rscratch1;
747 __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
748 __ cmpw(index, length);
749 if (index != r1) {
750 // ??? convention: move aberrant index into r1 for exception message
751 assert(r1 != array, "different registers");
752 __ mov(r1, index);
753 }
754 Label ok;
755 __ br(Assembler::LO, ok);
756 // ??? convention: move array into r3 for exception message
757 __ mov(r3, array);
758 __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
759 __ br(rscratch1);
760 __ bind(ok);
761 }
762
763 void TemplateTable::iaload()
764 {
765 transition(itos, itos);
766 __ mov(r1, r0);
767 __ pop_ptr(r0);
768 // r0: array
769 // r1: index
770 index_check(r0, r1); // leaves index in r1, kills rscratch1
771 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
772 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
773 }
774
775 void TemplateTable::laload()
776 {
777 transition(itos, ltos);
778 __ mov(r1, r0);
779 __ pop_ptr(r0);
780 // r0: array
781 // r1: index
782 index_check(r0, r1); // leaves index in r1, kills rscratch1
783 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
784 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
785 }
786
787 void TemplateTable::faload()
788 {
789 transition(itos, ftos);
790 __ mov(r1, r0);
791 __ pop_ptr(r0);
792 // r0: array
793 // r1: index
794 index_check(r0, r1); // leaves index in r1, kills rscratch1
795 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
796 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
797 }
798
799 void TemplateTable::daload()
800 {
801 transition(itos, dtos);
802 __ mov(r1, r0);
803 __ pop_ptr(r0);
804 // r0: array
805 // r1: index
806 index_check(r0, r1); // leaves index in r1, kills rscratch1
807 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
808 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
809 }
810
811 void TemplateTable::aaload()
812 {
813 transition(itos, atos);
814 __ mov(r1, r0);
815 __ pop_ptr(r0);
816 // r0: array
817 // r1: index
818 index_check(r0, r1); // leaves index in r1, kills rscratch1
819 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
820 do_oop_load(_masm,
821 Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
822 r0,
823 IS_ARRAY);
824 }
825
826 void TemplateTable::baload()
827 {
828 transition(itos, itos);
829 __ mov(r1, r0);
830 __ pop_ptr(r0);
831 // r0: array
832 // r1: index
833 index_check(r0, r1); // leaves index in r1, kills rscratch1
834 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
835 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
836 }
837
838 void TemplateTable::caload()
839 {
840 transition(itos, itos);
841 __ mov(r1, r0);
842 __ pop_ptr(r0);
843 // r0: array
844 // r1: index
845 index_check(r0, r1); // leaves index in r1, kills rscratch1
846 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
847 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
848 }
849
850 // iload followed by caload frequent pair
851 void TemplateTable::fast_icaload()
852 {
853 transition(vtos, itos);
854 // load index out of locals
855 locals_index(r2);
856 __ ldr(r1, iaddress(r2));
857
858 __ pop_ptr(r0);
859
860 // r0: array
861 // r1: index
862 index_check(r0, r1); // leaves index in r1, kills rscratch1
863 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
864 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
865 }
866
867 void TemplateTable::saload()
868 {
869 transition(itos, itos);
870 __ mov(r1, r0);
871 __ pop_ptr(r0);
872 // r0: array
873 // r1: index
874 index_check(r0, r1); // leaves index in r1, kills rscratch1
875 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
876 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
877 }
878
879 void TemplateTable::iload(int n)
880 {
881 transition(vtos, itos);
882 __ ldr(r0, iaddress(n));
883 }
884
885 void TemplateTable::lload(int n)
886 {
887 transition(vtos, ltos);
888 __ ldr(r0, laddress(n));
889 }
890
891 void TemplateTable::fload(int n)
892 {
893 transition(vtos, ftos);
894 __ ldrs(v0, faddress(n));
895 }
896
897 void TemplateTable::dload(int n)
898 {
899 transition(vtos, dtos);
900 __ ldrd(v0, daddress(n));
901 }
902
903 void TemplateTable::aload(int n)
904 {
905 transition(vtos, atos);
906 __ ldr(r0, iaddress(n));
907 }
908
909 void TemplateTable::aload_0() {
910 aload_0_internal();
911 }
912
913 void TemplateTable::nofast_aload_0() {
914 aload_0_internal(may_not_rewrite);
915 }
916
917 void TemplateTable::aload_0_internal(RewriteControl rc) {
918 // According to bytecode histograms, the pairs:
919 //
920 // _aload_0, _fast_igetfield
921 // _aload_0, _fast_agetfield
922 // _aload_0, _fast_fgetfield
923 //
924 // occur frequently. If RewriteFrequentPairs is set, the (slow)
925 // _aload_0 bytecode checks if the next bytecode is either
926 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
927 // rewrites the current bytecode into a pair bytecode; otherwise it
928 // rewrites the current bytecode into _fast_aload_0 that doesn't do
929 // the pair check anymore.
930 //
931 // Note: If the next bytecode is _getfield, the rewrite must be
932 // delayed, otherwise we may miss an opportunity for a pair.
933 //
934 // Also rewrite frequent pairs
935 // aload_0, aload_1
936 // aload_0, iload_1
937 // These bytecodes with a small amount of code are most profitable
938 // to rewrite
939 if (RewriteFrequentPairs && rc == may_rewrite) {
940 Label rewrite, done;
941 const Register bc = r4;
942
943 // get next bytecode
944 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
945
946 // if _getfield then wait with rewrite
947 __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
948 __ br(Assembler::EQ, done);
949
950 // if _igetfield then rewrite to _fast_iaccess_0
951 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
952 __ cmpw(r1, Bytecodes::_fast_igetfield);
953 __ movw(bc, Bytecodes::_fast_iaccess_0);
954 __ br(Assembler::EQ, rewrite);
955
956 // if _agetfield then rewrite to _fast_aaccess_0
957 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
958 __ cmpw(r1, Bytecodes::_fast_agetfield);
959 __ movw(bc, Bytecodes::_fast_aaccess_0);
960 __ br(Assembler::EQ, rewrite);
961
962 // if _fgetfield then rewrite to _fast_faccess_0
963 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
964 __ cmpw(r1, Bytecodes::_fast_fgetfield);
965 __ movw(bc, Bytecodes::_fast_faccess_0);
966 __ br(Assembler::EQ, rewrite);
967
968 // else rewrite to _fast_aload0
969 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
970 __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
971
972 // rewrite
973 // bc: new bytecode
974 __ bind(rewrite);
975 patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
976
977 __ bind(done);
978 }
979
980 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
981 aload(0);
982 }
983
984 void TemplateTable::istore()
985 {
986 transition(itos, vtos);
987 locals_index(r1);
988 // FIXME: We're being very pernickerty here storing a jint in a
989 // local with strw, which costs an extra instruction over what we'd
990 // be able to do with a simple str. We should just store the whole
991 // word.
992 __ lea(rscratch1, iaddress(r1));
993 __ strw(r0, Address(rscratch1));
994 }
995
996 void TemplateTable::lstore()
997 {
998 transition(ltos, vtos);
999 locals_index(r1);
1000 __ str(r0, laddress(r1, rscratch1, _masm));
1001 }
1002
1003 void TemplateTable::fstore() {
1004 transition(ftos, vtos);
1005 locals_index(r1);
1006 __ lea(rscratch1, iaddress(r1));
1007 __ strs(v0, Address(rscratch1));
1008 }
1009
1010 void TemplateTable::dstore() {
1011 transition(dtos, vtos);
1012 locals_index(r1);
1013 __ strd(v0, daddress(r1, rscratch1, _masm));
1014 }
1015
1016 void TemplateTable::astore()
1017 {
1018 transition(vtos, vtos);
1019 __ pop_ptr(r0);
1020 locals_index(r1);
1021 __ str(r0, aaddress(r1));
1022 }
1023
1024 void TemplateTable::wide_istore() {
1025 transition(vtos, vtos);
1026 __ pop_i();
1027 locals_index_wide(r1);
1028 __ lea(rscratch1, iaddress(r1));
1029 __ strw(r0, Address(rscratch1));
1030 }
1031
1032 void TemplateTable::wide_lstore() {
1033 transition(vtos, vtos);
1034 __ pop_l();
1035 locals_index_wide(r1);
1036 __ str(r0, laddress(r1, rscratch1, _masm));
1037 }
1038
1039 void TemplateTable::wide_fstore() {
1040 transition(vtos, vtos);
1041 __ pop_f();
1042 locals_index_wide(r1);
1043 __ lea(rscratch1, faddress(r1));
1044 __ strs(v0, rscratch1);
1045 }
1046
1047 void TemplateTable::wide_dstore() {
1048 transition(vtos, vtos);
1049 __ pop_d();
1050 locals_index_wide(r1);
1051 __ strd(v0, daddress(r1, rscratch1, _masm));
1052 }
1053
1054 void TemplateTable::wide_astore() {
1055 transition(vtos, vtos);
1056 __ pop_ptr(r0);
1057 locals_index_wide(r1);
1058 __ str(r0, aaddress(r1));
1059 }
1060
1061 void TemplateTable::iastore() {
1062 transition(itos, vtos);
1063 __ pop_i(r1);
1064 __ pop_ptr(r3);
1065 // r0: value
1066 // r1: index
1067 // r3: array
1068 index_check(r3, r1); // prefer index in r1
1069 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
1070 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg, noreg);
1071 }
1072
1073 void TemplateTable::lastore() {
1074 transition(ltos, vtos);
1075 __ pop_i(r1);
1076 __ pop_ptr(r3);
1077 // r0: value
1078 // r1: index
1079 // r3: array
1080 index_check(r3, r1); // prefer index in r1
1081 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
1082 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg, noreg);
1083 }
1084
1085 void TemplateTable::fastore() {
1086 transition(ftos, vtos);
1087 __ pop_i(r1);
1088 __ pop_ptr(r3);
1089 // v0: value
1090 // r1: index
1091 // r3: array
1092 index_check(r3, r1); // prefer index in r1
1093 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1094 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg, noreg);
1095 }
1096
1097 void TemplateTable::dastore() {
1098 transition(dtos, vtos);
1099 __ pop_i(r1);
1100 __ pop_ptr(r3);
1101 // v0: value
1102 // r1: index
1103 // r3: array
1104 index_check(r3, r1); // prefer index in r1
1105 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1106 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg);
1107 }
1108
1109 void TemplateTable::aastore() {
1110 Label is_null, ok_is_subtype, done;
1111 transition(vtos, vtos);
1112 // stack: ..., array, index, value
1113 __ ldr(r0, at_tos()); // value
1114 __ ldr(r2, at_tos_p1()); // index
1115 __ ldr(r3, at_tos_p2()); // array
1116
1117 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1118
1119 index_check(r3, r2); // kills r1
1120 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1121
1122 // do array store check - check for null value first
1123 __ cbz(r0, is_null);
1124
1125 // Move subklass into r1
1126 __ load_klass(r1, r0);
1127 // Move superklass into r0
1128 __ load_klass(r0, r3);
1129 __ ldr(r0, Address(r0,
1130 ObjArrayKlass::element_klass_offset()));
1131 // Compress array + index*oopSize + 12 into a single register. Frees r2.
1132
1133 // Generate subtype check. Blows r2, r5
1134 // Superklass in r0. Subklass in r1.
1135 __ gen_subtype_check(r1, ok_is_subtype);
1136
1137 // Come here on failure
1138 // object is at TOS
1139 __ b(Interpreter::_throw_ArrayStoreException_entry);
1140
1141 // Come here on success
1142 __ bind(ok_is_subtype);
1143
1144 // Get the value we will store
1145 __ ldr(r0, at_tos());
1146 // Now store using the appropriate barrier
1147 do_oop_store(_masm, element_address, r0, IS_ARRAY);
1148 __ b(done);
1149
1150 // Have a null in r0, r3=array, r2=index. Store null at ary[idx]
1151 __ bind(is_null);
1152 __ profile_null_seen(r2);
1153
1154 // Store a null
1155 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1156
1157 // Pop stack arguments
1158 __ bind(done);
1159 __ add(esp, esp, 3 * Interpreter::stackElementSize);
1160 }
1161
1162 void TemplateTable::bastore()
1163 {
1164 transition(itos, vtos);
1165 __ pop_i(r1);
1166 __ pop_ptr(r3);
1167 // r0: value
1168 // r1: index
1169 // r3: array
1170 index_check(r3, r1); // prefer index in r1
1171
1172 // Need to check whether array is boolean or byte
1173 // since both types share the bastore bytecode.
1174 __ load_klass(r2, r3);
1175 __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
1176 int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
1177 Label L_skip;
1178 __ tbz(r2, diffbit_index, L_skip);
1179 __ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1180 __ bind(L_skip);
1181
1182 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
1183 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg, noreg);
1184 }
1185
1186 void TemplateTable::castore()
1187 {
1188 transition(itos, vtos);
1189 __ pop_i(r1);
1190 __ pop_ptr(r3);
1191 // r0: value
1192 // r1: index
1193 // r3: array
1194 index_check(r3, r1); // prefer index in r1
1195 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
1196 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg, noreg);
1197 }
1198
1199 void TemplateTable::sastore()
1200 {
1201 castore();
1202 }
1203
1204 void TemplateTable::istore(int n)
1205 {
1206 transition(itos, vtos);
1207 __ str(r0, iaddress(n));
1208 }
1209
1210 void TemplateTable::lstore(int n)
1211 {
1212 transition(ltos, vtos);
1213 __ str(r0, laddress(n));
1214 }
1215
1216 void TemplateTable::fstore(int n)
1217 {
1218 transition(ftos, vtos);
1219 __ strs(v0, faddress(n));
1220 }
1221
1222 void TemplateTable::dstore(int n)
1223 {
1224 transition(dtos, vtos);
1225 __ strd(v0, daddress(n));
1226 }
1227
1228 void TemplateTable::astore(int n)
1229 {
1230 transition(vtos, vtos);
1231 __ pop_ptr(r0);
1232 __ str(r0, iaddress(n));
1233 }
1234
1235 void TemplateTable::pop()
1236 {
1237 transition(vtos, vtos);
1238 __ add(esp, esp, Interpreter::stackElementSize);
1239 }
1240
1241 void TemplateTable::pop2()
1242 {
1243 transition(vtos, vtos);
1244 __ add(esp, esp, 2 * Interpreter::stackElementSize);
1245 }
1246
1247 void TemplateTable::dup()
1248 {
1249 transition(vtos, vtos);
1250 __ ldr(r0, Address(esp, 0));
1251 __ push(r0);
1252 // stack: ..., a, a
1253 }
1254
1255 void TemplateTable::dup_x1()
1256 {
1257 transition(vtos, vtos);
1258 // stack: ..., a, b
1259 __ ldr(r0, at_tos()); // load b
1260 __ ldr(r2, at_tos_p1()); // load a
1261 __ str(r0, at_tos_p1()); // store b
1262 __ str(r2, at_tos()); // store a
1263 __ push(r0); // push b
1264 // stack: ..., b, a, b
1265 }
1266
1267 void TemplateTable::dup_x2()
1268 {
1269 transition(vtos, vtos);
1270 // stack: ..., a, b, c
1271 __ ldr(r0, at_tos()); // load c
1272 __ ldr(r2, at_tos_p2()); // load a
1273 __ str(r0, at_tos_p2()); // store c in a
1274 __ push(r0); // push c
1275 // stack: ..., c, b, c, c
1276 __ ldr(r0, at_tos_p2()); // load b
1277 __ str(r2, at_tos_p2()); // store a in b
1278 // stack: ..., c, a, c, c
1279 __ str(r0, at_tos_p1()); // store b in c
1280 // stack: ..., c, a, b, c
1281 }
1282
1283 void TemplateTable::dup2()
1284 {
1285 transition(vtos, vtos);
1286 // stack: ..., a, b
1287 __ ldr(r0, at_tos_p1()); // load a
1288 __ push(r0); // push a
1289 __ ldr(r0, at_tos_p1()); // load b
1290 __ push(r0); // push b
1291 // stack: ..., a, b, a, b
1292 }
1293
1294 void TemplateTable::dup2_x1()
1295 {
1296 transition(vtos, vtos);
1297 // stack: ..., a, b, c
1298 __ ldr(r2, at_tos()); // load c
1299 __ ldr(r0, at_tos_p1()); // load b
1300 __ push(r0); // push b
1301 __ push(r2); // push c
1302 // stack: ..., a, b, c, b, c
1303 __ str(r2, at_tos_p3()); // store c in b
1304 // stack: ..., a, c, c, b, c
1305 __ ldr(r2, at_tos_p4()); // load a
1306 __ str(r2, at_tos_p2()); // store a in 2nd c
1307 // stack: ..., a, c, a, b, c
1308 __ str(r0, at_tos_p4()); // store b in a
1309 // stack: ..., b, c, a, b, c
1310 }
1311
1312 void TemplateTable::dup2_x2()
1313 {
1314 transition(vtos, vtos);
1315 // stack: ..., a, b, c, d
1316 __ ldr(r2, at_tos()); // load d
1317 __ ldr(r0, at_tos_p1()); // load c
1318 __ push(r0) ; // push c
1319 __ push(r2); // push d
1320 // stack: ..., a, b, c, d, c, d
1321 __ ldr(r0, at_tos_p4()); // load b
1322 __ str(r0, at_tos_p2()); // store b in d
1323 __ str(r2, at_tos_p4()); // store d in b
1324 // stack: ..., a, d, c, b, c, d
1325 __ ldr(r2, at_tos_p5()); // load a
1326 __ ldr(r0, at_tos_p3()); // load c
1327 __ str(r2, at_tos_p3()); // store a in c
1328 __ str(r0, at_tos_p5()); // store c in a
1329 // stack: ..., c, d, a, b, c, d
1330 }
1331
1332 void TemplateTable::swap()
1333 {
1334 transition(vtos, vtos);
1335 // stack: ..., a, b
1336 __ ldr(r2, at_tos_p1()); // load a
1337 __ ldr(r0, at_tos()); // load b
1338 __ str(r2, at_tos()); // store a in b
1339 __ str(r0, at_tos_p1()); // store b in a
1340 // stack: ..., b, a
1341 }
1342
1343 void TemplateTable::iop2(Operation op)
1344 {
1345 transition(itos, itos);
1346 // r0 <== r1 op r0
1347 __ pop_i(r1);
1348 switch (op) {
1349 case add : __ addw(r0, r1, r0); break;
1350 case sub : __ subw(r0, r1, r0); break;
1351 case mul : __ mulw(r0, r1, r0); break;
1352 case _and : __ andw(r0, r1, r0); break;
1353 case _or : __ orrw(r0, r1, r0); break;
1354 case _xor : __ eorw(r0, r1, r0); break;
1355 case shl : __ lslvw(r0, r1, r0); break;
1356 case shr : __ asrvw(r0, r1, r0); break;
1357 case ushr : __ lsrvw(r0, r1, r0);break;
1358 default : ShouldNotReachHere();
1359 }
1360 }
1361
1362 void TemplateTable::lop2(Operation op)
1363 {
1364 transition(ltos, ltos);
1365 // r0 <== r1 op r0
1366 __ pop_l(r1);
1367 switch (op) {
1368 case add : __ add(r0, r1, r0); break;
1369 case sub : __ sub(r0, r1, r0); break;
1370 case mul : __ mul(r0, r1, r0); break;
1371 case _and : __ andr(r0, r1, r0); break;
1372 case _or : __ orr(r0, r1, r0); break;
1373 case _xor : __ eor(r0, r1, r0); break;
1374 default : ShouldNotReachHere();
1375 }
1376 }
1377
1378 void TemplateTable::idiv()
1379 {
1380 transition(itos, itos);
1381 // explicitly check for div0
1382 Label no_div0;
1383 __ cbnzw(r0, no_div0);
1384 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1385 __ br(rscratch1);
1386 __ bind(no_div0);
1387 __ pop_i(r1);
1388 // r0 <== r1 idiv r0
1389 __ corrected_idivl(r0, r1, r0, /* want_remainder */ false);
1390 }
1391
1392 void TemplateTable::irem()
1393 {
1394 transition(itos, itos);
1395 // explicitly check for div0
1396 Label no_div0;
1397 __ cbnzw(r0, no_div0);
1398 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1399 __ br(rscratch1);
1400 __ bind(no_div0);
1401 __ pop_i(r1);
1402 // r0 <== r1 irem r0
1403 __ corrected_idivl(r0, r1, r0, /* want_remainder */ true);
1404 }
1405
1406 void TemplateTable::lmul()
1407 {
1408 transition(ltos, ltos);
1409 __ pop_l(r1);
1410 __ mul(r0, r0, r1);
1411 }
1412
1413 void TemplateTable::ldiv()
1414 {
1415 transition(ltos, ltos);
1416 // explicitly check for div0
1417 Label no_div0;
1418 __ cbnz(r0, no_div0);
1419 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1420 __ br(rscratch1);
1421 __ bind(no_div0);
1422 __ pop_l(r1);
1423 // r0 <== r1 ldiv r0
1424 __ corrected_idivq(r0, r1, r0, /* want_remainder */ false);
1425 }
1426
1427 void TemplateTable::lrem()
1428 {
1429 transition(ltos, ltos);
1430 // explicitly check for div0
1431 Label no_div0;
1432 __ cbnz(r0, no_div0);
1433 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1434 __ br(rscratch1);
1435 __ bind(no_div0);
1436 __ pop_l(r1);
1437 // r0 <== r1 lrem r0
1438 __ corrected_idivq(r0, r1, r0, /* want_remainder */ true);
1439 }
1440
1441 void TemplateTable::lshl()
1442 {
1443 transition(itos, ltos);
1444 // shift count is in r0
1445 __ pop_l(r1);
1446 __ lslv(r0, r1, r0);
1447 }
1448
1449 void TemplateTable::lshr()
1450 {
1451 transition(itos, ltos);
1452 // shift count is in r0
1453 __ pop_l(r1);
1454 __ asrv(r0, r1, r0);
1455 }
1456
1457 void TemplateTable::lushr()
1458 {
1459 transition(itos, ltos);
1460 // shift count is in r0
1461 __ pop_l(r1);
1462 __ lsrv(r0, r1, r0);
1463 }
1464
1465 void TemplateTable::fop2(Operation op)
1466 {
1467 transition(ftos, ftos);
1468 switch (op) {
1469 case add:
1470 // n.b. use ldrd because this is a 64 bit slot
1471 __ pop_f(v1);
1472 __ fadds(v0, v1, v0);
1473 break;
1474 case sub:
1475 __ pop_f(v1);
1476 __ fsubs(v0, v1, v0);
1477 break;
1478 case mul:
1479 __ pop_f(v1);
1480 __ fmuls(v0, v1, v0);
1481 break;
1482 case div:
1483 __ pop_f(v1);
1484 __ fdivs(v0, v1, v0);
1485 break;
1486 case rem:
1487 __ fmovs(v1, v0);
1488 __ pop_f(v0);
1489 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1490 break;
1491 default:
1492 ShouldNotReachHere();
1493 break;
1494 }
1495 }
1496
1497 void TemplateTable::dop2(Operation op)
1498 {
1499 transition(dtos, dtos);
1500 switch (op) {
1501 case add:
1502 // n.b. use ldrd because this is a 64 bit slot
1503 __ pop_d(v1);
1504 __ faddd(v0, v1, v0);
1505 break;
1506 case sub:
1507 __ pop_d(v1);
1508 __ fsubd(v0, v1, v0);
1509 break;
1510 case mul:
1511 __ pop_d(v1);
1512 __ fmuld(v0, v1, v0);
1513 break;
1514 case div:
1515 __ pop_d(v1);
1516 __ fdivd(v0, v1, v0);
1517 break;
1518 case rem:
1519 __ fmovd(v1, v0);
1520 __ pop_d(v0);
1521 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1522 break;
1523 default:
1524 ShouldNotReachHere();
1525 break;
1526 }
1527 }
1528
1529 void TemplateTable::ineg()
1530 {
1531 transition(itos, itos);
1532 __ negw(r0, r0);
1533
1534 }
1535
1536 void TemplateTable::lneg()
1537 {
1538 transition(ltos, ltos);
1539 __ neg(r0, r0);
1540 }
1541
1542 void TemplateTable::fneg()
1543 {
1544 transition(ftos, ftos);
1545 __ fnegs(v0, v0);
1546 }
1547
1548 void TemplateTable::dneg()
1549 {
1550 transition(dtos, dtos);
1551 __ fnegd(v0, v0);
1552 }
1553
1554 void TemplateTable::iinc()
1555 {
1556 transition(vtos, vtos);
1557 __ load_signed_byte(r1, at_bcp(2)); // get constant
1558 locals_index(r2);
1559 __ ldr(r0, iaddress(r2));
1560 __ addw(r0, r0, r1);
1561 __ str(r0, iaddress(r2));
1562 }
1563
1564 void TemplateTable::wide_iinc()
1565 {
1566 transition(vtos, vtos);
1567 // __ mov(r1, zr);
1568 __ ldrw(r1, at_bcp(2)); // get constant and index
1569 __ rev16(r1, r1);
1570 __ ubfx(r2, r1, 0, 16);
1571 __ neg(r2, r2);
1572 __ sbfx(r1, r1, 16, 16);
1573 __ ldr(r0, iaddress(r2));
1574 __ addw(r0, r0, r1);
1575 __ str(r0, iaddress(r2));
1576 }
1577
1578 void TemplateTable::convert()
1579 {
1580 // Checking
1581 #ifdef ASSERT
1582 {
1583 TosState tos_in = ilgl;
1584 TosState tos_out = ilgl;
1585 switch (bytecode()) {
1586 case Bytecodes::_i2l: // fall through
1587 case Bytecodes::_i2f: // fall through
1588 case Bytecodes::_i2d: // fall through
1589 case Bytecodes::_i2b: // fall through
1590 case Bytecodes::_i2c: // fall through
1591 case Bytecodes::_i2s: tos_in = itos; break;
1592 case Bytecodes::_l2i: // fall through
1593 case Bytecodes::_l2f: // fall through
1594 case Bytecodes::_l2d: tos_in = ltos; break;
1595 case Bytecodes::_f2i: // fall through
1596 case Bytecodes::_f2l: // fall through
1597 case Bytecodes::_f2d: tos_in = ftos; break;
1598 case Bytecodes::_d2i: // fall through
1599 case Bytecodes::_d2l: // fall through
1600 case Bytecodes::_d2f: tos_in = dtos; break;
1601 default : ShouldNotReachHere();
1602 }
1603 switch (bytecode()) {
1604 case Bytecodes::_l2i: // fall through
1605 case Bytecodes::_f2i: // fall through
1606 case Bytecodes::_d2i: // fall through
1607 case Bytecodes::_i2b: // fall through
1608 case Bytecodes::_i2c: // fall through
1609 case Bytecodes::_i2s: tos_out = itos; break;
1610 case Bytecodes::_i2l: // fall through
1611 case Bytecodes::_f2l: // fall through
1612 case Bytecodes::_d2l: tos_out = ltos; break;
1613 case Bytecodes::_i2f: // fall through
1614 case Bytecodes::_l2f: // fall through
1615 case Bytecodes::_d2f: tos_out = ftos; break;
1616 case Bytecodes::_i2d: // fall through
1617 case Bytecodes::_l2d: // fall through
1618 case Bytecodes::_f2d: tos_out = dtos; break;
1619 default : ShouldNotReachHere();
1620 }
1621 transition(tos_in, tos_out);
1622 }
1623 #endif // ASSERT
1624 // static const int64_t is_nan = 0x8000000000000000L;
1625
1626 // Conversion
1627 switch (bytecode()) {
1628 case Bytecodes::_i2l:
1629 __ sxtw(r0, r0);
1630 break;
1631 case Bytecodes::_i2f:
1632 __ scvtfws(v0, r0);
1633 break;
1634 case Bytecodes::_i2d:
1635 __ scvtfwd(v0, r0);
1636 break;
1637 case Bytecodes::_i2b:
1638 __ sxtbw(r0, r0);
1639 break;
1640 case Bytecodes::_i2c:
1641 __ uxthw(r0, r0);
1642 break;
1643 case Bytecodes::_i2s:
1644 __ sxthw(r0, r0);
1645 break;
1646 case Bytecodes::_l2i:
1647 __ uxtw(r0, r0);
1648 break;
1649 case Bytecodes::_l2f:
1650 __ scvtfs(v0, r0);
1651 break;
1652 case Bytecodes::_l2d:
1653 __ scvtfd(v0, r0);
1654 break;
1655 case Bytecodes::_f2i:
1656 {
1657 Label L_Okay;
1658 __ clear_fpsr();
1659 __ fcvtzsw(r0, v0);
1660 __ get_fpsr(r1);
1661 __ cbzw(r1, L_Okay);
1662 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i));
1663 __ bind(L_Okay);
1664 }
1665 break;
1666 case Bytecodes::_f2l:
1667 {
1668 Label L_Okay;
1669 __ clear_fpsr();
1670 __ fcvtzs(r0, v0);
1671 __ get_fpsr(r1);
1672 __ cbzw(r1, L_Okay);
1673 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1674 __ bind(L_Okay);
1675 }
1676 break;
1677 case Bytecodes::_f2d:
1678 __ fcvts(v0, v0);
1679 break;
1680 case Bytecodes::_d2i:
1681 {
1682 Label L_Okay;
1683 __ clear_fpsr();
1684 __ fcvtzdw(r0, v0);
1685 __ get_fpsr(r1);
1686 __ cbzw(r1, L_Okay);
1687 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
1688 __ bind(L_Okay);
1689 }
1690 break;
1691 case Bytecodes::_d2l:
1692 {
1693 Label L_Okay;
1694 __ clear_fpsr();
1695 __ fcvtzd(r0, v0);
1696 __ get_fpsr(r1);
1697 __ cbzw(r1, L_Okay);
1698 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1699 __ bind(L_Okay);
1700 }
1701 break;
1702 case Bytecodes::_d2f:
1703 __ fcvtd(v0, v0);
1704 break;
1705 default:
1706 ShouldNotReachHere();
1707 }
1708 }
1709
1710 void TemplateTable::lcmp()
1711 {
1712 transition(ltos, itos);
1713 Label done;
1714 __ pop_l(r1);
1715 __ cmp(r1, r0);
1716 __ mov(r0, (uint64_t)-1L);
1717 __ br(Assembler::LT, done);
1718 // __ mov(r0, 1UL);
1719 // __ csel(r0, r0, zr, Assembler::NE);
1720 // and here is a faster way
1721 __ csinc(r0, zr, zr, Assembler::EQ);
1722 __ bind(done);
1723 }
1724
1725 void TemplateTable::float_cmp(bool is_float, int unordered_result)
1726 {
1727 Label done;
1728 if (is_float) {
1729 // XXX get rid of pop here, use ... reg, mem32
1730 __ pop_f(v1);
1731 __ fcmps(v1, v0);
1732 } else {
1733 // XXX get rid of pop here, use ... reg, mem64
1734 __ pop_d(v1);
1735 __ fcmpd(v1, v0);
1736 }
1737 if (unordered_result < 0) {
1738 // we want -1 for unordered or less than, 0 for equal and 1 for
1739 // greater than.
1740 __ mov(r0, (uint64_t)-1L);
1741 // for FP LT tests less than or unordered
1742 __ br(Assembler::LT, done);
1743 // install 0 for EQ otherwise 1
1744 __ csinc(r0, zr, zr, Assembler::EQ);
1745 } else {
1746 // we want -1 for less than, 0 for equal and 1 for unordered or
1747 // greater than.
1748 __ mov(r0, 1L);
1749 // for FP HI tests greater than or unordered
1750 __ br(Assembler::HI, done);
1751 // install 0 for EQ otherwise ~0
1752 __ csinv(r0, zr, zr, Assembler::EQ);
1753
1754 }
1755 __ bind(done);
1756 }
1757
1758 void TemplateTable::branch(bool is_jsr, bool is_wide)
1759 {
1760 __ profile_taken_branch(r0, r1);
1761 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1762 InvocationCounter::counter_offset();
1763 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1764 InvocationCounter::counter_offset();
1765
1766 // load branch displacement
1767 if (!is_wide) {
1768 __ ldrh(r2, at_bcp(1));
1769 __ rev16(r2, r2);
1770 // sign extend the 16 bit value in r2
1771 __ sbfm(r2, r2, 0, 15);
1772 } else {
1773 __ ldrw(r2, at_bcp(1));
1774 __ revw(r2, r2);
1775 // sign extend the 32 bit value in r2
1776 __ sbfm(r2, r2, 0, 31);
1777 }
1778
1779 // Handle all the JSR stuff here, then exit.
1780 // It's much shorter and cleaner than intermingling with the non-JSR
1781 // normal-branch stuff occurring below.
1782
1783 if (is_jsr) {
1784 // Pre-load the next target bytecode into rscratch1
1785 __ load_unsigned_byte(rscratch1, Address(rbcp, r2));
1786 // compute return address as bci
1787 __ ldr(rscratch2, Address(rmethod, Method::const_offset()));
1788 __ add(rscratch2, rscratch2,
1789 in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
1790 __ sub(r1, rbcp, rscratch2);
1791 __ push_i(r1);
1792 // Adjust the bcp by the 16-bit displacement in r2
1793 __ add(rbcp, rbcp, r2);
1794 __ dispatch_only(vtos, /*generate_poll*/true);
1795 return;
1796 }
1797
1798 // Normal (non-jsr) branch handling
1799
1800 // Adjust the bcp by the displacement in r2
1801 __ add(rbcp, rbcp, r2);
1802
1803 assert(UseLoopCounter || !UseOnStackReplacement,
1804 "on-stack-replacement requires loop counters");
1805 Label backedge_counter_overflow;
1806 Label dispatch;
1807 if (UseLoopCounter) {
1808 // increment backedge counter for backward branches
1809 // r0: MDO
1810 // w1: MDO bumped taken-count
1811 // r2: target offset
1812 __ cmp(r2, zr);
1813 __ br(Assembler::GT, dispatch); // count only if backward branch
1814
1815 // ECN: FIXME: This code smells
1816 // check if MethodCounters exists
1817 Label has_counters;
1818 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1819 __ cbnz(rscratch1, has_counters);
1820 __ push(r0);
1821 __ push(r1);
1822 __ push(r2);
1823 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
1824 InterpreterRuntime::build_method_counters), rmethod);
1825 __ pop(r2);
1826 __ pop(r1);
1827 __ pop(r0);
1828 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1829 __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
1830 __ bind(has_counters);
1831
1832 Label no_mdo;
1833 int increment = InvocationCounter::count_increment;
1834 if (ProfileInterpreter) {
1835 // Are we profiling?
1836 __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1837 __ cbz(r1, no_mdo);
1838 // Increment the MDO backedge counter
1839 const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
1840 in_bytes(InvocationCounter::counter_offset()));
1841 const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
1842 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1843 r0, rscratch1, false, Assembler::EQ,
1844 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1845 __ b(dispatch);
1846 }
1847 __ bind(no_mdo);
1848 // Increment backedge counter in MethodCounters*
1849 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1850 const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
1851 __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
1852 r0, rscratch2, false, Assembler::EQ,
1853 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1854 __ bind(dispatch);
1855 }
1856
1857 // Pre-load the next target bytecode into rscratch1
1858 __ load_unsigned_byte(rscratch1, Address(rbcp, 0));
1859
1860 // continue with the bytecode @ target
1861 // rscratch1: target bytecode
1862 // rbcp: target bcp
1863 __ dispatch_only(vtos, /*generate_poll*/true);
1864
1865 if (UseLoopCounter && UseOnStackReplacement) {
1866 // invocation counter overflow
1867 __ bind(backedge_counter_overflow);
1868 __ neg(r2, r2);
1869 __ add(r2, r2, rbcp); // branch bcp
1870 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1871 __ call_VM(noreg,
1872 CAST_FROM_FN_PTR(address,
1873 InterpreterRuntime::frequency_counter_overflow),
1874 r2);
1875 __ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
1876
1877 // r0: osr nmethod (osr ok) or null (osr not possible)
1878 // w1: target bytecode
1879 // r2: scratch
1880 __ cbz(r0, dispatch); // test result -- no osr if null
1881 // nmethod may have been invalidated (VM may block upon call_VM return)
1882 __ ldrb(r2, Address(r0, nmethod::state_offset()));
1883 if (nmethod::in_use != 0)
1884 __ sub(r2, r2, nmethod::in_use);
1885 __ cbnz(r2, dispatch);
1886
1887 // We have the address of an on stack replacement routine in r0
1888 // We need to prepare to execute the OSR method. First we must
1889 // migrate the locals and monitors off of the stack.
1890
1891 __ mov(r19, r0); // save the nmethod
1892
1893 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1894
1895 // r0 is OSR buffer, move it to expected parameter location
1896 __ mov(j_rarg0, r0);
1897
1898 // remove activation
1899 // get sender esp
1900 __ ldr(esp,
1901 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
1902 // remove frame anchor
1903 __ leave();
1904 // Ensure compiled code always sees stack at proper alignment
1905 __ andr(sp, esp, -16);
1906
1907 // and begin the OSR nmethod
1908 __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
1909 __ br(rscratch1);
1910 }
1911 }
1912
1913
1914 void TemplateTable::if_0cmp(Condition cc)
1915 {
1916 transition(itos, vtos);
1917 // assume branch is more often taken than not (loops use backward branches)
1918 Label not_taken;
1919 if (cc == equal)
1920 __ cbnzw(r0, not_taken);
1921 else if (cc == not_equal)
1922 __ cbzw(r0, not_taken);
1923 else {
1924 __ andsw(zr, r0, r0);
1925 __ br(j_not(cc), not_taken);
1926 }
1927
1928 branch(false, false);
1929 __ bind(not_taken);
1930 __ profile_not_taken_branch(r0);
1931 }
1932
1933 void TemplateTable::if_icmp(Condition cc)
1934 {
1935 transition(itos, vtos);
1936 // assume branch is more often taken than not (loops use backward branches)
1937 Label not_taken;
1938 __ pop_i(r1);
1939 __ cmpw(r1, r0, Assembler::LSL);
1940 __ br(j_not(cc), not_taken);
1941 branch(false, false);
1942 __ bind(not_taken);
1943 __ profile_not_taken_branch(r0);
1944 }
1945
1946 void TemplateTable::if_nullcmp(Condition cc)
1947 {
1948 transition(atos, vtos);
1949 // assume branch is more often taken than not (loops use backward branches)
1950 Label not_taken;
1951 if (cc == equal)
1952 __ cbnz(r0, not_taken);
1953 else
1954 __ cbz(r0, not_taken);
1955 branch(false, false);
1956 __ bind(not_taken);
1957 __ profile_not_taken_branch(r0);
1958 }
1959
1960 void TemplateTable::if_acmp(Condition cc)
1961 {
1962 transition(atos, vtos);
1963 // assume branch is more often taken than not (loops use backward branches)
1964 Label not_taken;
1965 __ pop_ptr(r1);
1966 __ cmpoop(r1, r0);
1967 __ br(j_not(cc), not_taken);
1968 branch(false, false);
1969 __ bind(not_taken);
1970 __ profile_not_taken_branch(r0);
1971 }
1972
1973 void TemplateTable::ret() {
1974 transition(vtos, vtos);
1975 locals_index(r1);
1976 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1977 __ profile_ret(r1, r2);
1978 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1979 __ lea(rbcp, Address(rbcp, r1));
1980 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1981 __ dispatch_next(vtos, 0, /*generate_poll*/true);
1982 }
1983
1984 void TemplateTable::wide_ret() {
1985 transition(vtos, vtos);
1986 locals_index_wide(r1);
1987 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1988 __ profile_ret(r1, r2);
1989 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1990 __ lea(rbcp, Address(rbcp, r1));
1991 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1992 __ dispatch_next(vtos, 0, /*generate_poll*/true);
1993 }
1994
1995
1996 void TemplateTable::tableswitch() {
1997 Label default_case, continue_execution;
1998 transition(itos, vtos);
1999 // align rbcp
2000 __ lea(r1, at_bcp(BytesPerInt));
2001 __ andr(r1, r1, -BytesPerInt);
2002 // load lo & hi
2003 __ ldrw(r2, Address(r1, BytesPerInt));
2004 __ ldrw(r3, Address(r1, 2 * BytesPerInt));
2005 __ rev32(r2, r2);
2006 __ rev32(r3, r3);
2007 // check against lo & hi
2008 __ cmpw(r0, r2);
2009 __ br(Assembler::LT, default_case);
2010 __ cmpw(r0, r3);
2011 __ br(Assembler::GT, default_case);
2012 // lookup dispatch offset
2013 __ subw(r0, r0, r2);
2014 __ lea(r3, Address(r1, r0, Address::uxtw(2)));
2015 __ ldrw(r3, Address(r3, 3 * BytesPerInt));
2016 __ profile_switch_case(r0, r1, r2);
2017 // continue execution
2018 __ bind(continue_execution);
2019 __ rev32(r3, r3);
2020 __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
2021 __ add(rbcp, rbcp, r3, ext::sxtw);
2022 __ dispatch_only(vtos, /*generate_poll*/true);
2023 // handle default
2024 __ bind(default_case);
2025 __ profile_switch_default(r0);
2026 __ ldrw(r3, Address(r1, 0));
2027 __ b(continue_execution);
2028 }
2029
2030 void TemplateTable::lookupswitch() {
2031 transition(itos, itos);
2032 __ stop("lookupswitch bytecode should have been rewritten");
2033 }
2034
2035 void TemplateTable::fast_linearswitch() {
2036 transition(itos, vtos);
2037 Label loop_entry, loop, found, continue_execution;
2038 // bswap r0 so we can avoid bswapping the table entries
2039 __ rev32(r0, r0);
2040 // align rbcp
2041 __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2042 // this instruction (change offsets
2043 // below)
2044 __ andr(r19, r19, -BytesPerInt);
2045 // set counter
2046 __ ldrw(r1, Address(r19, BytesPerInt));
2047 __ rev32(r1, r1);
2048 __ b(loop_entry);
2049 // table search
2050 __ bind(loop);
2051 __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2052 __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
2053 __ cmpw(r0, rscratch1);
2054 __ br(Assembler::EQ, found);
2055 __ bind(loop_entry);
2056 __ subs(r1, r1, 1);
2057 __ br(Assembler::PL, loop);
2058 // default case
2059 __ profile_switch_default(r0);
2060 __ ldrw(r3, Address(r19, 0));
2061 __ b(continue_execution);
2062 // entry found -> get offset
2063 __ bind(found);
2064 __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2065 __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
2066 __ profile_switch_case(r1, r0, r19);
2067 // continue execution
2068 __ bind(continue_execution);
2069 __ rev32(r3, r3);
2070 __ add(rbcp, rbcp, r3, ext::sxtw);
2071 __ ldrb(rscratch1, Address(rbcp, 0));
2072 __ dispatch_only(vtos, /*generate_poll*/true);
2073 }
2074
2075 void TemplateTable::fast_binaryswitch() {
2076 transition(itos, vtos);
2077 // Implementation using the following core algorithm:
2078 //
2079 // int binary_search(int key, LookupswitchPair* array, int n) {
2080 // // Binary search according to "Methodik des Programmierens" by
2081 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2082 // int i = 0;
2083 // int j = n;
2084 // while (i+1 < j) {
2085 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2086 // // with Q: for all i: 0 <= i < n: key < a[i]
2087 // // where a stands for the array and assuming that the (inexisting)
2088 // // element a[n] is infinitely big.
2089 // int h = (i + j) >> 1;
2090 // // i < h < j
2091 // if (key < array[h].fast_match()) {
2092 // j = h;
2093 // } else {
2094 // i = h;
2095 // }
2096 // }
2097 // // R: a[i] <= key < a[i+1] or Q
2098 // // (i.e., if key is within array, i is the correct index)
2099 // return i;
2100 // }
2101
2102 // Register allocation
2103 const Register key = r0; // already set (tosca)
2104 const Register array = r1;
2105 const Register i = r2;
2106 const Register j = r3;
2107 const Register h = rscratch1;
2108 const Register temp = rscratch2;
2109
2110 // Find array start
2111 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2112 // get rid of this
2113 // instruction (change
2114 // offsets below)
2115 __ andr(array, array, -BytesPerInt);
2116
2117 // Initialize i & j
2118 __ mov(i, 0); // i = 0;
2119 __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
2120
2121 // Convert j into native byteordering
2122 __ rev32(j, j);
2123
2124 // And start
2125 Label entry;
2126 __ b(entry);
2127
2128 // binary search loop
2129 {
2130 Label loop;
2131 __ bind(loop);
2132 // int h = (i + j) >> 1;
2133 __ addw(h, i, j); // h = i + j;
2134 __ lsrw(h, h, 1); // h = (i + j) >> 1;
2135 // if (key < array[h].fast_match()) {
2136 // j = h;
2137 // } else {
2138 // i = h;
2139 // }
2140 // Convert array[h].match to native byte-ordering before compare
2141 __ ldr(temp, Address(array, h, Address::lsl(3)));
2142 __ rev32(temp, temp);
2143 __ cmpw(key, temp);
2144 // j = h if (key < array[h].fast_match())
2145 __ csel(j, h, j, Assembler::LT);
2146 // i = h if (key >= array[h].fast_match())
2147 __ csel(i, h, i, Assembler::GE);
2148 // while (i+1 < j)
2149 __ bind(entry);
2150 __ addw(h, i, 1); // i+1
2151 __ cmpw(h, j); // i+1 < j
2152 __ br(Assembler::LT, loop);
2153 }
2154
2155 // end of binary search, result index is i (must check again!)
2156 Label default_case;
2157 // Convert array[i].match to native byte-ordering before compare
2158 __ ldr(temp, Address(array, i, Address::lsl(3)));
2159 __ rev32(temp, temp);
2160 __ cmpw(key, temp);
2161 __ br(Assembler::NE, default_case);
2162
2163 // entry found -> j = offset
2164 __ add(j, array, i, ext::uxtx, 3);
2165 __ ldrw(j, Address(j, BytesPerInt));
2166 __ profile_switch_case(i, key, array);
2167 __ rev32(j, j);
2168 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2169 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2170 __ dispatch_only(vtos, /*generate_poll*/true);
2171
2172 // default case -> j = default offset
2173 __ bind(default_case);
2174 __ profile_switch_default(i);
2175 __ ldrw(j, Address(array, -2 * BytesPerInt));
2176 __ rev32(j, j);
2177 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2178 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2179 __ dispatch_only(vtos, /*generate_poll*/true);
2180 }
2181
2182
2183 void TemplateTable::_return(TosState state)
2184 {
2185 transition(state, state);
2186 assert(_desc->calls_vm(),
2187 "inconsistent calls_vm information"); // call in remove_activation
2188
2189 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2190 assert(state == vtos, "only valid state");
2191
2192 __ ldr(c_rarg1, aaddress(0));
2193 __ load_klass(r3, c_rarg1);
2194 __ ldrb(r3, Address(r3, Klass::misc_flags_offset()));
2195 Label skip_register_finalizer;
2196 __ tbz(r3, exact_log2(KlassFlags::_misc_has_finalizer), skip_register_finalizer);
2197
2198 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2199
2200 __ bind(skip_register_finalizer);
2201 }
2202
2203 // Issue a StoreStore barrier after all stores but before return
2204 // from any constructor for any class with a final field. We don't
2205 // know if this is a finalizer, so we always do so.
2206 if (_desc->bytecode() == Bytecodes::_return)
2207 __ membar(MacroAssembler::StoreStore);
2208
2209 if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2210 Label no_safepoint;
2211 __ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
2212 __ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), no_safepoint);
2213 __ push(state);
2214 __ push_cont_fastpath(rthread);
2215 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
2216 __ pop_cont_fastpath(rthread);
2217 __ pop(state);
2218 __ bind(no_safepoint);
2219 }
2220
2221 // Narrow result if state is itos but result type is smaller.
2222 // Need to narrow in the return bytecode rather than in generate_return_entry
2223 // since compiled code callers expect the result to already be narrowed.
2224 if (state == itos) {
2225 __ narrow(r0);
2226 }
2227
2228 __ remove_activation(state);
2229 __ ret(lr);
2230 }
2231
2232 // ----------------------------------------------------------------------------
2233 // Volatile variables demand their effects be made known to all CPU's
2234 // in order. Store buffers on most chips allow reads & writes to
2235 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2236 // without some kind of memory barrier (i.e., it's not sufficient that
2237 // the interpreter does not reorder volatile references, the hardware
2238 // also must not reorder them).
2239 //
2240 // According to the new Java Memory Model (JMM):
2241 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2242 // writes act as acquire & release, so:
2243 // (2) A read cannot let unrelated NON-volatile memory refs that
2244 // happen after the read float up to before the read. It's OK for
2245 // non-volatile memory refs that happen before the volatile read to
2246 // float down below it.
2247 // (3) Similar a volatile write cannot let unrelated NON-volatile
2248 // memory refs that happen BEFORE the write float down to after the
2249 // write. It's OK for non-volatile memory refs that happen after the
2250 // volatile write to float up before it.
2251 //
2252 // We only put in barriers around volatile refs (they are expensive),
2253 // not _between_ memory refs (that would require us to track the
2254 // flavor of the previous memory refs). Requirements (2) and (3)
2255 // require some barriers before volatile stores and after volatile
2256 // loads. These nearly cover requirement (1) but miss the
2257 // volatile-store-volatile-load case. This final case is placed after
2258 // volatile-stores although it could just as well go before
2259 // volatile-loads.
2260
2261 void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
2262 Register Rcache,
2263 Register index) {
2264 const Register temp = r19;
2265 assert_different_registers(Rcache, index, temp);
2266 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2267
2268 Label resolved, clinit_barrier_slow;
2269
2270 Bytecodes::Code code = bytecode();
2271 __ load_method_entry(Rcache, index);
2272 switch(byte_no) {
2273 case f1_byte:
2274 __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset())));
2275 break;
2276 case f2_byte:
2277 __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset())));
2278 break;
2279 }
2280 // Load-acquire the bytecode to match store-release in InterpreterRuntime
2281 __ ldarb(temp, temp);
2282 __ subs(zr, temp, (int) code); // have we resolved this bytecode?
2283 __ br(Assembler::EQ, resolved);
2284
2285 // resolve first time through
2286 // Class initialization barrier slow path lands here as well.
2287 __ bind(clinit_barrier_slow);
2288 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2289 __ mov(temp, (int) code);
2290 __ call_VM(noreg, entry, temp);
2291
2292 // Update registers with resolved info
2293 __ load_method_entry(Rcache, index);
2294 // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2295 // so all clients ofthis method must be modified accordingly
2296 __ bind(resolved);
2297
2298 // Class initialization barrier for static methods
2299 if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2300 __ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
2301 __ load_method_holder(temp, temp);
2302 __ clinit_barrier(temp, rscratch1, nullptr, &clinit_barrier_slow);
2303 }
2304 }
2305
2306 void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
2307 Register Rcache,
2308 Register index) {
2309 const Register temp = r19;
2310 assert_different_registers(Rcache, index, temp);
2311
2312 Label resolved;
2313
2314 Bytecodes::Code code = bytecode();
2315 switch (code) {
2316 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2317 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2318 default: break;
2319 }
2320
2321 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2322 __ load_field_entry(Rcache, index);
2323 if (byte_no == f1_byte) {
2324 __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::get_code_offset())));
2325 } else {
2326 __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::put_code_offset())));
2327 }
2328 // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
2329 __ ldarb(temp, temp);
2330 __ subs(zr, temp, (int) code); // have we resolved this bytecode?
2331 __ br(Assembler::EQ, resolved);
2332
2333 // resolve first time through
2334 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2335 __ mov(temp, (int) code);
2336 __ call_VM(noreg, entry, temp);
2337
2338 // Update registers with resolved info
2339 __ load_field_entry(Rcache, index);
2340 __ bind(resolved);
2341 }
2342
2343 void TemplateTable::load_resolved_field_entry(Register obj,
2344 Register cache,
2345 Register tos_state,
2346 Register offset,
2347 Register flags,
2348 bool is_static = false) {
2349 assert_different_registers(cache, tos_state, flags, offset);
2350
2351 // Field offset
2352 __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
2353
2354 // Flags
2355 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset())));
2356
2357 // TOS state
2358 if (tos_state != noreg) {
2359 __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset())));
2360 }
2361
2362 // Klass overwrite register
2363 if (is_static) {
2364 __ ldr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset()));
2365 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2366 __ ldr(obj, Address(obj, mirror_offset));
2367 __ resolve_oop_handle(obj, r5, rscratch2);
2368 }
2369 }
2370
2371 void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
2372 Register method,
2373 Register flags) {
2374
2375 // setup registers
2376 const Register index = flags;
2377 assert_different_registers(method, cache, flags);
2378
2379 // determine constant pool cache field offsets
2380 resolve_cache_and_index_for_method(f1_byte, cache, index);
2381 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2382 __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2383 }
2384
2385 void TemplateTable::load_resolved_method_entry_handle(Register cache,
2386 Register method,
2387 Register ref_index,
2388 Register flags) {
2389 // setup registers
2390 const Register index = ref_index;
2391 assert_different_registers(method, flags);
2392 assert_different_registers(method, cache, index);
2393
2394 // determine constant pool cache field offsets
2395 resolve_cache_and_index_for_method(f1_byte, cache, index);
2396 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2397
2398 // maybe push appendix to arguments (just before return address)
2399 Label L_no_push;
2400 __ tbz(flags, ResolvedMethodEntry::has_appendix_shift, L_no_push);
2401 // invokehandle uses an index into the resolved references array
2402 __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
2403 // Push the appendix as a trailing parameter.
2404 // This must be done before we get the receiver,
2405 // since the parameter_size includes it.
2406 Register appendix = method;
2407 __ load_resolved_reference_at_index(appendix, ref_index);
2408 __ push(appendix); // push appendix (MethodType, CallSite, etc.)
2409 __ bind(L_no_push);
2410
2411 __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2412 }
2413
2414 void TemplateTable::load_resolved_method_entry_interface(Register cache,
2415 Register klass,
2416 Register method_or_table_index,
2417 Register flags) {
2418 // setup registers
2419 const Register index = method_or_table_index;
2420 assert_different_registers(method_or_table_index, cache, flags);
2421
2422 // determine constant pool cache field offsets
2423 resolve_cache_and_index_for_method(f1_byte, cache, index);
2424 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2425
2426 // Invokeinterface can behave in different ways:
2427 // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
2428 // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
2429 // vtable index is placed in the register.
2430 // Otherwise, the registers will be populated with the klass and method.
2431
2432 Label NotVirtual; Label NotVFinal; Label Done;
2433 __ tbz(flags, ResolvedMethodEntry::is_forced_virtual_shift, NotVirtual);
2434 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2435 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2436 __ b(Done);
2437
2438 __ bind(NotVFinal);
2439 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2440 __ b(Done);
2441
2442 __ bind(NotVirtual);
2443 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2444 __ ldr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
2445 __ bind(Done);
2446 }
2447
2448 void TemplateTable::load_resolved_method_entry_virtual(Register cache,
2449 Register method_or_table_index,
2450 Register flags) {
2451 // setup registers
2452 const Register index = flags;
2453 assert_different_registers(method_or_table_index, cache, flags);
2454
2455 // determine constant pool cache field offsets
2456 resolve_cache_and_index_for_method(f2_byte, cache, index);
2457 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2458
2459 // method_or_table_index can either be an itable index or a method depending on the virtual final flag
2460 Label NotVFinal; Label Done;
2461 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2462 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2463 __ b(Done);
2464
2465 __ bind(NotVFinal);
2466 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2467 __ bind(Done);
2468 }
2469
2470 // The rmethod register is input and overwritten to be the adapter method for the
2471 // indy call. Link Register (lr) is set to the return address for the adapter and
2472 // an appendix may be pushed to the stack. Registers r0-r3 are clobbered
2473 void TemplateTable::load_invokedynamic_entry(Register method) {
2474 // setup registers
2475 const Register appendix = r0;
2476 const Register cache = r2;
2477 const Register index = r3;
2478 assert_different_registers(method, appendix, cache, index, rcpool);
2479
2480 __ save_bcp();
2481
2482 Label resolved;
2483
2484 __ load_resolved_indy_entry(cache, index);
2485 // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2486 __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2487 __ ldar(method, method);
2488
2489 // Compare the method to zero
2490 __ cbnz(method, resolved);
2491
2492 Bytecodes::Code code = bytecode();
2493
2494 // Call to the interpreter runtime to resolve invokedynamic
2495 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2496 __ mov(method, code); // this is essentially Bytecodes::_invokedynamic
2497 __ call_VM(noreg, entry, method);
2498 // Update registers with resolved info
2499 __ load_resolved_indy_entry(cache, index);
2500 // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2501 __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2502 __ ldar(method, method);
2503
2504 #ifdef ASSERT
2505 __ cbnz(method, resolved);
2506 __ stop("Should be resolved by now");
2507 #endif // ASSERT
2508 __ bind(resolved);
2509
2510 Label L_no_push;
2511 // Check if there is an appendix
2512 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())));
2513 __ tbz(index, ResolvedIndyEntry::has_appendix_shift, L_no_push);
2514
2515 // Get appendix
2516 __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())));
2517 // Push the appendix as a trailing parameter
2518 // since the parameter_size includes it.
2519 __ push(method);
2520 __ mov(method, index);
2521 __ load_resolved_reference_at_index(appendix, method);
2522 __ verify_oop(appendix);
2523 __ pop(method);
2524 __ push(appendix); // push appendix (MethodType, CallSite, etc.)
2525 __ bind(L_no_push);
2526
2527 // compute return type
2528 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())));
2529 // load return address
2530 // Return address is loaded into link register(lr) and not pushed to the stack
2531 // like x86
2532 {
2533 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2534 __ mov(rscratch1, table_addr);
2535 __ ldr(lr, Address(rscratch1, index, Address::lsl(3)));
2536 }
2537 }
2538
2539 // The registers cache and index expected to be set before call.
2540 // Correct values of the cache and index registers are preserved.
2541 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2542 bool is_static, bool has_tos) {
2543 // do the JVMTI work here to avoid disturbing the register state below
2544 // We use c_rarg registers here because we want to use the register used in
2545 // the call to the VM
2546 if (JvmtiExport::can_post_field_access()) {
2547 // Check to see if a field access watch has been set before we
2548 // take the time to call into the VM.
2549 Label L1;
2550 assert_different_registers(cache, index, r0);
2551 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2552 __ ldrw(r0, Address(rscratch1));
2553 __ cbzw(r0, L1);
2554
2555 __ load_field_entry(c_rarg2, index);
2556
2557 if (is_static) {
2558 __ mov(c_rarg1, zr); // null object reference
2559 } else {
2560 __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
2561 __ verify_oop(c_rarg1);
2562 }
2563 // c_rarg1: object pointer or null
2564 // c_rarg2: cache entry pointer
2565 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2566 InterpreterRuntime::post_field_access),
2567 c_rarg1, c_rarg2);
2568 __ load_field_entry(cache, index);
2569 __ bind(L1);
2570 }
2571 }
2572
2573 void TemplateTable::pop_and_check_object(Register r)
2574 {
2575 __ pop_ptr(r);
2576 __ null_check(r); // for field access must check obj.
2577 __ verify_oop(r);
2578 }
2579
2580 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2581 {
2582 const Register cache = r4;
2583 const Register obj = r4;
2584 const Register index = r3;
2585 const Register tos_state = r3;
2586 const Register off = r19;
2587 const Register flags = r6;
2588 const Register bc = r4; // uses same reg as obj, so don't mix them
2589
2590 resolve_cache_and_index_for_field(byte_no, cache, index);
2591 jvmti_post_field_access(cache, index, is_static, false);
2592 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2593
2594 if (!is_static) {
2595 // obj is on the stack
2596 pop_and_check_object(obj);
2597 }
2598
2599 // 8179954: We need to make sure that the code generated for
2600 // volatile accesses forms a sequentially-consistent set of
2601 // operations when combined with STLR and LDAR. Without a leading
2602 // membar it's possible for a simple Dekker test to fail if loads
2603 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
2604 // the stores in one method and we interpret the loads in another.
2605 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){
2606 Label notVolatile;
2607 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2608 __ membar(MacroAssembler::AnyAny);
2609 __ bind(notVolatile);
2610 }
2611
2612 const Address field(obj, off);
2613
2614 Label Done, notByte, notBool, notInt, notShort, notChar,
2615 notLong, notFloat, notObj, notDouble;
2616
2617 assert(btos == 0, "change code, btos != 0");
2618 __ cbnz(tos_state, notByte);
2619
2620 // Don't rewrite getstatic, only getfield
2621 if (is_static) rc = may_not_rewrite;
2622
2623 // btos
2624 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2625 __ push(btos);
2626 // Rewrite bytecode to be faster
2627 if (rc == may_rewrite) {
2628 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2629 }
2630 __ b(Done);
2631
2632 __ bind(notByte);
2633 __ cmp(tos_state, (u1)ztos);
2634 __ br(Assembler::NE, notBool);
2635
2636 // ztos (same code as btos)
2637 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2638 __ push(ztos);
2639 // Rewrite bytecode to be faster
2640 if (rc == may_rewrite) {
2641 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2642 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2643 }
2644 __ b(Done);
2645
2646 __ bind(notBool);
2647 __ cmp(tos_state, (u1)atos);
2648 __ br(Assembler::NE, notObj);
2649 // atos
2650 do_oop_load(_masm, field, r0, IN_HEAP);
2651 __ push(atos);
2652 if (rc == may_rewrite) {
2653 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2654 }
2655 __ b(Done);
2656
2657 __ bind(notObj);
2658 __ cmp(tos_state, (u1)itos);
2659 __ br(Assembler::NE, notInt);
2660 // itos
2661 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2662 __ push(itos);
2663 // Rewrite bytecode to be faster
2664 if (rc == may_rewrite) {
2665 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2666 }
2667 __ b(Done);
2668
2669 __ bind(notInt);
2670 __ cmp(tos_state, (u1)ctos);
2671 __ br(Assembler::NE, notChar);
2672 // ctos
2673 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2674 __ push(ctos);
2675 // Rewrite bytecode to be faster
2676 if (rc == may_rewrite) {
2677 patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2678 }
2679 __ b(Done);
2680
2681 __ bind(notChar);
2682 __ cmp(tos_state, (u1)stos);
2683 __ br(Assembler::NE, notShort);
2684 // stos
2685 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
2686 __ push(stos);
2687 // Rewrite bytecode to be faster
2688 if (rc == may_rewrite) {
2689 patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2690 }
2691 __ b(Done);
2692
2693 __ bind(notShort);
2694 __ cmp(tos_state, (u1)ltos);
2695 __ br(Assembler::NE, notLong);
2696 // ltos
2697 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
2698 __ push(ltos);
2699 // Rewrite bytecode to be faster
2700 if (rc == may_rewrite) {
2701 patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2702 }
2703 __ b(Done);
2704
2705 __ bind(notLong);
2706 __ cmp(tos_state, (u1)ftos);
2707 __ br(Assembler::NE, notFloat);
2708 // ftos
2709 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2710 __ push(ftos);
2711 // Rewrite bytecode to be faster
2712 if (rc == may_rewrite) {
2713 patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2714 }
2715 __ b(Done);
2716
2717 __ bind(notFloat);
2718 #ifdef ASSERT
2719 __ cmp(tos_state, (u1)dtos);
2720 __ br(Assembler::NE, notDouble);
2721 #endif
2722 // dtos
2723 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2724 __ push(dtos);
2725 // Rewrite bytecode to be faster
2726 if (rc == may_rewrite) {
2727 patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2728 }
2729 #ifdef ASSERT
2730 __ b(Done);
2731
2732 __ bind(notDouble);
2733 __ stop("Bad state");
2734 #endif
2735
2736 __ bind(Done);
2737
2738 Label notVolatile;
2739 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2740 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2741 __ bind(notVolatile);
2742 }
2743
2744
2745 void TemplateTable::getfield(int byte_no)
2746 {
2747 getfield_or_static(byte_no, false);
2748 }
2749
2750 void TemplateTable::nofast_getfield(int byte_no) {
2751 getfield_or_static(byte_no, false, may_not_rewrite);
2752 }
2753
2754 void TemplateTable::getstatic(int byte_no)
2755 {
2756 getfield_or_static(byte_no, true);
2757 }
2758
2759 // The registers cache and index expected to be set before call.
2760 // The function may destroy various registers, just not the cache and index registers.
2761 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2762 transition(vtos, vtos);
2763
2764 if (JvmtiExport::can_post_field_modification()) {
2765 // Check to see if a field modification watch has been set before
2766 // we take the time to call into the VM.
2767 Label L1;
2768 assert_different_registers(cache, index, r0);
2769 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2770 __ ldrw(r0, Address(rscratch1));
2771 __ cbz(r0, L1);
2772
2773 __ mov(c_rarg2, cache);
2774
2775 if (is_static) {
2776 // Life is simple. Null out the object pointer.
2777 __ mov(c_rarg1, zr);
2778 } else {
2779 // Life is harder. The stack holds the value on top, followed by
2780 // the object. We don't know the size of the value, though; it
2781 // could be one or two words depending on its type. As a result,
2782 // we must find the type to determine where the object is.
2783 __ load_unsigned_byte(c_rarg3, Address(c_rarg2, in_bytes(ResolvedFieldEntry::type_offset())));
2784 Label nope2, done, ok;
2785 __ ldr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2786 __ cmpw(c_rarg3, ltos);
2787 __ br(Assembler::EQ, ok);
2788 __ cmpw(c_rarg3, dtos);
2789 __ br(Assembler::NE, nope2);
2790 __ bind(ok);
2791 __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2792 __ bind(nope2);
2793 }
2794 // object (tos)
2795 __ mov(c_rarg3, esp);
2796 // c_rarg1: object pointer set up above (null if static)
2797 // c_rarg2: cache entry pointer
2798 // c_rarg3: jvalue object on the stack
2799 __ call_VM(noreg,
2800 CAST_FROM_FN_PTR(address,
2801 InterpreterRuntime::post_field_modification),
2802 c_rarg1, c_rarg2, c_rarg3);
2803 __ load_field_entry(cache, index);
2804 __ bind(L1);
2805 }
2806 }
2807
2808 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2809 transition(vtos, vtos);
2810
2811 const Register cache = r2;
2812 const Register index = r3;
2813 const Register tos_state = r3;
2814 const Register obj = r2;
2815 const Register off = r19;
2816 const Register flags = r0;
2817 const Register bc = r4;
2818
2819 resolve_cache_and_index_for_field(byte_no, cache, index);
2820 jvmti_post_field_mod(cache, index, is_static);
2821 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2822
2823 Label Done;
2824 __ mov(r5, flags);
2825
2826 {
2827 Label notVolatile;
2828 __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2829 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2830 __ bind(notVolatile);
2831 }
2832
2833 // field address
2834 const Address field(obj, off);
2835
2836 Label notByte, notBool, notInt, notShort, notChar,
2837 notLong, notFloat, notObj, notDouble;
2838
2839 assert(btos == 0, "change code, btos != 0");
2840 __ cbnz(tos_state, notByte);
2841
2842 // Don't rewrite putstatic, only putfield
2843 if (is_static) rc = may_not_rewrite;
2844
2845 // btos
2846 {
2847 __ pop(btos);
2848 if (!is_static) pop_and_check_object(obj);
2849 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
2850 if (rc == may_rewrite) {
2851 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2852 }
2853 __ b(Done);
2854 }
2855
2856 __ bind(notByte);
2857 __ cmp(tos_state, (u1)ztos);
2858 __ br(Assembler::NE, notBool);
2859
2860 // ztos
2861 {
2862 __ pop(ztos);
2863 if (!is_static) pop_and_check_object(obj);
2864 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
2865 if (rc == may_rewrite) {
2866 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2867 }
2868 __ b(Done);
2869 }
2870
2871 __ bind(notBool);
2872 __ cmp(tos_state, (u1)atos);
2873 __ br(Assembler::NE, notObj);
2874
2875 // atos
2876 {
2877 __ pop(atos);
2878 if (!is_static) pop_and_check_object(obj);
2879 // Store into the field
2880 do_oop_store(_masm, field, r0, IN_HEAP);
2881 if (rc == may_rewrite) {
2882 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2883 }
2884 __ b(Done);
2885 }
2886
2887 __ bind(notObj);
2888 __ cmp(tos_state, (u1)itos);
2889 __ br(Assembler::NE, notInt);
2890
2891 // itos
2892 {
2893 __ pop(itos);
2894 if (!is_static) pop_and_check_object(obj);
2895 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
2896 if (rc == may_rewrite) {
2897 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2898 }
2899 __ b(Done);
2900 }
2901
2902 __ bind(notInt);
2903 __ cmp(tos_state, (u1)ctos);
2904 __ br(Assembler::NE, notChar);
2905
2906 // ctos
2907 {
2908 __ pop(ctos);
2909 if (!is_static) pop_and_check_object(obj);
2910 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
2911 if (rc == may_rewrite) {
2912 patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
2913 }
2914 __ b(Done);
2915 }
2916
2917 __ bind(notChar);
2918 __ cmp(tos_state, (u1)stos);
2919 __ br(Assembler::NE, notShort);
2920
2921 // stos
2922 {
2923 __ pop(stos);
2924 if (!is_static) pop_and_check_object(obj);
2925 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
2926 if (rc == may_rewrite) {
2927 patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
2928 }
2929 __ b(Done);
2930 }
2931
2932 __ bind(notShort);
2933 __ cmp(tos_state, (u1)ltos);
2934 __ br(Assembler::NE, notLong);
2935
2936 // ltos
2937 {
2938 __ pop(ltos);
2939 if (!is_static) pop_and_check_object(obj);
2940 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
2941 if (rc == may_rewrite) {
2942 patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
2943 }
2944 __ b(Done);
2945 }
2946
2947 __ bind(notLong);
2948 __ cmp(tos_state, (u1)ftos);
2949 __ br(Assembler::NE, notFloat);
2950
2951 // ftos
2952 {
2953 __ pop(ftos);
2954 if (!is_static) pop_and_check_object(obj);
2955 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
2956 if (rc == may_rewrite) {
2957 patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
2958 }
2959 __ b(Done);
2960 }
2961
2962 __ bind(notFloat);
2963 #ifdef ASSERT
2964 __ cmp(tos_state, (u1)dtos);
2965 __ br(Assembler::NE, notDouble);
2966 #endif
2967
2968 // dtos
2969 {
2970 __ pop(dtos);
2971 if (!is_static) pop_and_check_object(obj);
2972 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
2973 if (rc == may_rewrite) {
2974 patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
2975 }
2976 }
2977
2978 #ifdef ASSERT
2979 __ b(Done);
2980
2981 __ bind(notDouble);
2982 __ stop("Bad state");
2983 #endif
2984
2985 __ bind(Done);
2986
2987 {
2988 Label notVolatile;
2989 __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2990 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
2991 __ bind(notVolatile);
2992 }
2993 }
2994
2995 void TemplateTable::putfield(int byte_no)
2996 {
2997 putfield_or_static(byte_no, false);
2998 }
2999
3000 void TemplateTable::nofast_putfield(int byte_no) {
3001 putfield_or_static(byte_no, false, may_not_rewrite);
3002 }
3003
3004 void TemplateTable::putstatic(int byte_no) {
3005 putfield_or_static(byte_no, true);
3006 }
3007
3008 void TemplateTable::jvmti_post_fast_field_mod() {
3009 if (JvmtiExport::can_post_field_modification()) {
3010 // Check to see if a field modification watch has been set before
3011 // we take the time to call into the VM.
3012 Label L2;
3013 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3014 __ ldrw(c_rarg3, Address(rscratch1));
3015 __ cbzw(c_rarg3, L2);
3016 __ pop_ptr(r19); // copy the object pointer from tos
3017 __ verify_oop(r19);
3018 __ push_ptr(r19); // put the object pointer back on tos
3019 // Save tos values before call_VM() clobbers them. Since we have
3020 // to do it for every data type, we use the saved values as the
3021 // jvalue object.
3022 switch (bytecode()) { // load values into the jvalue object
3023 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
3024 case Bytecodes::_fast_bputfield: // fall through
3025 case Bytecodes::_fast_zputfield: // fall through
3026 case Bytecodes::_fast_sputfield: // fall through
3027 case Bytecodes::_fast_cputfield: // fall through
3028 case Bytecodes::_fast_iputfield: __ push_i(r0); break;
3029 case Bytecodes::_fast_dputfield: __ push_d(); break;
3030 case Bytecodes::_fast_fputfield: __ push_f(); break;
3031 case Bytecodes::_fast_lputfield: __ push_l(r0); break;
3032
3033 default:
3034 ShouldNotReachHere();
3035 }
3036 __ mov(c_rarg3, esp); // points to jvalue on the stack
3037 // access constant pool cache entry
3038 __ load_field_entry(c_rarg2, r0);
3039 __ verify_oop(r19);
3040 // r19: object pointer copied above
3041 // c_rarg2: cache entry pointer
3042 // c_rarg3: jvalue object on the stack
3043 __ call_VM(noreg,
3044 CAST_FROM_FN_PTR(address,
3045 InterpreterRuntime::post_field_modification),
3046 r19, c_rarg2, c_rarg3);
3047
3048 switch (bytecode()) { // restore tos values
3049 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
3050 case Bytecodes::_fast_bputfield: // fall through
3051 case Bytecodes::_fast_zputfield: // fall through
3052 case Bytecodes::_fast_sputfield: // fall through
3053 case Bytecodes::_fast_cputfield: // fall through
3054 case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
3055 case Bytecodes::_fast_dputfield: __ pop_d(); break;
3056 case Bytecodes::_fast_fputfield: __ pop_f(); break;
3057 case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
3058 default: break;
3059 }
3060 __ bind(L2);
3061 }
3062 }
3063
3064 void TemplateTable::fast_storefield(TosState state)
3065 {
3066 transition(state, vtos);
3067
3068 ByteSize base = ConstantPoolCache::base_offset();
3069
3070 jvmti_post_fast_field_mod();
3071
3072 // access constant pool cache
3073 __ load_field_entry(r2, r1);
3074
3075 // R1: field offset, R2: field holder, R3: flags
3076 load_resolved_field_entry(r2, r2, noreg, r1, r3);
3077
3078 {
3079 Label notVolatile;
3080 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3081 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
3082 __ bind(notVolatile);
3083 }
3084
3085 Label notVolatile;
3086
3087 // Get object from stack
3088 pop_and_check_object(r2);
3089
3090 // field address
3091 const Address field(r2, r1);
3092
3093 // access field
3094 switch (bytecode()) {
3095 case Bytecodes::_fast_aputfield:
3096 do_oop_store(_masm, field, r0, IN_HEAP);
3097 break;
3098 case Bytecodes::_fast_lputfield:
3099 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
3100 break;
3101 case Bytecodes::_fast_iputfield:
3102 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
3103 break;
3104 case Bytecodes::_fast_zputfield:
3105 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
3106 break;
3107 case Bytecodes::_fast_bputfield:
3108 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
3109 break;
3110 case Bytecodes::_fast_sputfield:
3111 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
3112 break;
3113 case Bytecodes::_fast_cputfield:
3114 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
3115 break;
3116 case Bytecodes::_fast_fputfield:
3117 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3118 break;
3119 case Bytecodes::_fast_dputfield:
3120 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
3121 break;
3122 default:
3123 ShouldNotReachHere();
3124 }
3125
3126 {
3127 Label notVolatile;
3128 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3129 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
3130 __ bind(notVolatile);
3131 }
3132 }
3133
3134
3135 void TemplateTable::fast_accessfield(TosState state)
3136 {
3137 transition(atos, state);
3138 // Do the JVMTI work here to avoid disturbing the register state below
3139 if (JvmtiExport::can_post_field_access()) {
3140 // Check to see if a field access watch has been set before we
3141 // take the time to call into the VM.
3142 Label L1;
3143 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3144 __ ldrw(r2, Address(rscratch1));
3145 __ cbzw(r2, L1);
3146 // access constant pool cache entry
3147 __ load_field_entry(c_rarg2, rscratch2);
3148 __ verify_oop(r0);
3149 __ push_ptr(r0); // save object pointer before call_VM() clobbers it
3150 __ mov(c_rarg1, r0);
3151 // c_rarg1: object pointer copied above
3152 // c_rarg2: cache entry pointer
3153 __ call_VM(noreg,
3154 CAST_FROM_FN_PTR(address,
3155 InterpreterRuntime::post_field_access),
3156 c_rarg1, c_rarg2);
3157 __ pop_ptr(r0); // restore object pointer
3158 __ bind(L1);
3159 }
3160
3161 // access constant pool cache
3162 __ load_field_entry(r2, r1);
3163
3164 __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3165 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3166
3167 // r0: object
3168 __ verify_oop(r0);
3169 __ null_check(r0);
3170 const Address field(r0, r1);
3171
3172 // 8179954: We need to make sure that the code generated for
3173 // volatile accesses forms a sequentially-consistent set of
3174 // operations when combined with STLR and LDAR. Without a leading
3175 // membar it's possible for a simple Dekker test to fail if loads
3176 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3177 // the stores in one method and we interpret the loads in another.
3178 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3179 Label notVolatile;
3180 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3181 __ membar(MacroAssembler::AnyAny);
3182 __ bind(notVolatile);
3183 }
3184
3185 // access field
3186 switch (bytecode()) {
3187 case Bytecodes::_fast_agetfield:
3188 do_oop_load(_masm, field, r0, IN_HEAP);
3189 __ verify_oop(r0);
3190 break;
3191 case Bytecodes::_fast_lgetfield:
3192 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3193 break;
3194 case Bytecodes::_fast_igetfield:
3195 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3196 break;
3197 case Bytecodes::_fast_bgetfield:
3198 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3199 break;
3200 case Bytecodes::_fast_sgetfield:
3201 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3202 break;
3203 case Bytecodes::_fast_cgetfield:
3204 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3205 break;
3206 case Bytecodes::_fast_fgetfield:
3207 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3208 break;
3209 case Bytecodes::_fast_dgetfield:
3210 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3211 break;
3212 default:
3213 ShouldNotReachHere();
3214 }
3215 {
3216 Label notVolatile;
3217 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3218 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3219 __ bind(notVolatile);
3220 }
3221 }
3222
3223 void TemplateTable::fast_xaccess(TosState state)
3224 {
3225 transition(vtos, state);
3226
3227 // get receiver
3228 __ ldr(r0, aaddress(0));
3229 // access constant pool cache
3230 __ load_field_entry(r2, r3, 2);
3231 __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3232
3233 // 8179954: We need to make sure that the code generated for
3234 // volatile accesses forms a sequentially-consistent set of
3235 // operations when combined with STLR and LDAR. Without a leading
3236 // membar it's possible for a simple Dekker test to fail if loads
3237 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3238 // the stores in one method and we interpret the loads in another.
3239 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3240 Label notVolatile;
3241 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3242 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3243 __ membar(MacroAssembler::AnyAny);
3244 __ bind(notVolatile);
3245 }
3246
3247 // make sure exception is reported in correct bcp range (getfield is
3248 // next instruction)
3249 __ increment(rbcp);
3250 __ null_check(r0);
3251 switch (state) {
3252 case itos:
3253 __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3254 break;
3255 case atos:
3256 do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
3257 __ verify_oop(r0);
3258 break;
3259 case ftos:
3260 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3261 break;
3262 default:
3263 ShouldNotReachHere();
3264 }
3265
3266 {
3267 Label notVolatile;
3268 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3269 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3270 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3271 __ bind(notVolatile);
3272 }
3273
3274 __ decrement(rbcp);
3275 }
3276
3277
3278
3279 //-----------------------------------------------------------------------------
3280 // Calls
3281
3282 void TemplateTable::prepare_invoke(Register cache, Register recv) {
3283
3284 Bytecodes::Code code = bytecode();
3285 const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
3286
3287 // save 'interpreter return address'
3288 __ save_bcp();
3289
3290 // Load TOS state for later
3291 __ load_unsigned_byte(rscratch2, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
3292
3293 // load receiver if needed (note: no return address pushed yet)
3294 if (load_receiver) {
3295 __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
3296 __ add(rscratch1, esp, recv, ext::uxtx, 3);
3297 __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
3298 __ verify_oop(recv);
3299 }
3300
3301 // load return address
3302 {
3303 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3304 __ mov(rscratch1, table_addr);
3305 __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3)));
3306 }
3307 }
3308
3309
3310 void TemplateTable::invokevirtual_helper(Register index,
3311 Register recv,
3312 Register flags)
3313 {
3314 // Uses temporary registers r0, r3
3315 assert_different_registers(index, recv, r0, r3);
3316 // Test for an invoke of a final method
3317 Label notFinal;
3318 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, notFinal);
3319
3320 const Register method = index; // method must be rmethod
3321 assert(method == rmethod,
3322 "Method must be rmethod for interpreter calling convention");
3323
3324 // do the call - the index is actually the method to call
3325 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3326
3327 // It's final, need a null check here!
3328 __ null_check(recv);
3329
3330 // profile this call
3331 __ profile_final_call(r0);
3332 __ profile_arguments_type(r0, method, r4, true);
3333
3334 __ jump_from_interpreted(method, r0);
3335
3336 __ bind(notFinal);
3337
3338 // get receiver klass
3339 __ load_klass(r0, recv);
3340
3341 // profile this call
3342 __ profile_virtual_call(r0, rlocals, r3);
3343
3344 // get target Method & entry point
3345 __ lookup_virtual_method(r0, index, method);
3346 __ profile_arguments_type(r3, method, r4, true);
3347 // FIXME -- this looks completely redundant. is it?
3348 // __ ldr(r3, Address(method, Method::interpreter_entry_offset()));
3349 __ jump_from_interpreted(method, r3);
3350 }
3351
3352 void TemplateTable::invokevirtual(int byte_no)
3353 {
3354 transition(vtos, vtos);
3355 assert(byte_no == f2_byte, "use this argument");
3356
3357 load_resolved_method_entry_virtual(r2, // ResolvedMethodEntry*
3358 rmethod, // Method* or itable index
3359 r3); // flags
3360 prepare_invoke(r2, r2); // recv
3361
3362 // rmethod: index (actually a Method*)
3363 // r2: receiver
3364 // r3: flags
3365
3366 invokevirtual_helper(rmethod, r2, r3);
3367 }
3368
3369 void TemplateTable::invokespecial(int byte_no)
3370 {
3371 transition(vtos, vtos);
3372 assert(byte_no == f1_byte, "use this argument");
3373
3374 load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry*
3375 rmethod, // Method*
3376 r3); // flags
3377 prepare_invoke(r2, r2); // get receiver also for null check
3378 __ verify_oop(r2);
3379 __ null_check(r2);
3380 // do the call
3381 __ profile_call(r0);
3382 __ profile_arguments_type(r0, rmethod, rbcp, false);
3383 __ jump_from_interpreted(rmethod, r0);
3384 }
3385
3386 void TemplateTable::invokestatic(int byte_no)
3387 {
3388 transition(vtos, vtos);
3389 assert(byte_no == f1_byte, "use this argument");
3390
3391 load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry*
3392 rmethod, // Method*
3393 r3); // flags
3394 prepare_invoke(r2, r2); // get receiver also for null check
3395
3396 // do the call
3397 __ profile_call(r0);
3398 __ profile_arguments_type(r0, rmethod, r4, false);
3399 __ jump_from_interpreted(rmethod, r0);
3400 }
3401
3402 void TemplateTable::fast_invokevfinal(int byte_no)
3403 {
3404 __ call_Unimplemented();
3405 }
3406
3407 void TemplateTable::invokeinterface(int byte_no) {
3408 transition(vtos, vtos);
3409 assert(byte_no == f1_byte, "use this argument");
3410
3411 load_resolved_method_entry_interface(r2, // ResolvedMethodEntry*
3412 r0, // Klass*
3413 rmethod, // Method* or itable/vtable index
3414 r3); // flags
3415 prepare_invoke(r2, r2); // receiver
3416
3417 // r0: interface klass (from f1)
3418 // rmethod: method (from f2)
3419 // r2: receiver
3420 // r3: flags
3421
3422 // First check for Object case, then private interface method,
3423 // then regular interface method.
3424
3425 // Special case of invokeinterface called for virtual method of
3426 // java.lang.Object. See cpCache.cpp for details.
3427 Label notObjectMethod;
3428 __ tbz(r3, ResolvedMethodEntry::is_forced_virtual_shift, notObjectMethod);
3429
3430 invokevirtual_helper(rmethod, r2, r3);
3431 __ bind(notObjectMethod);
3432
3433 Label no_such_interface;
3434
3435 // Check for private method invocation - indicated by vfinal
3436 Label notVFinal;
3437 __ tbz(r3, ResolvedMethodEntry::is_vfinal_shift, notVFinal);
3438
3439 // Get receiver klass into r3
3440 __ load_klass(r3, r2);
3441
3442 Label subtype;
3443 __ check_klass_subtype(r3, r0, r4, subtype);
3444 // If we get here the typecheck failed
3445 __ b(no_such_interface);
3446 __ bind(subtype);
3447
3448 __ profile_final_call(r0);
3449 __ profile_arguments_type(r0, rmethod, r4, true);
3450 __ jump_from_interpreted(rmethod, r0);
3451
3452 __ bind(notVFinal);
3453
3454 // Get receiver klass into r3
3455 __ restore_locals();
3456 __ load_klass(r3, r2);
3457
3458 Label no_such_method;
3459
3460 // Preserve method for throw_AbstractMethodErrorVerbose.
3461 __ mov(r16, rmethod);
3462 // Receiver subtype check against REFC.
3463 // Superklass in r0. Subklass in r3. Blows rscratch2, r13
3464 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3465 r3, r0, noreg,
3466 // outputs: scan temp. reg, scan temp. reg
3467 rscratch2, r13,
3468 no_such_interface,
3469 /*return_method=*/false);
3470
3471 // profile this call
3472 __ profile_virtual_call(r3, r13, r19);
3473
3474 // Get declaring interface class from method, and itable index
3475
3476 __ load_method_holder(r0, rmethod);
3477 __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset()));
3478 __ subw(rmethod, rmethod, Method::itable_index_max);
3479 __ negw(rmethod, rmethod);
3480
3481 // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3482 __ mov(rlocals, r3);
3483 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3484 rlocals, r0, rmethod,
3485 // outputs: method, scan temp. reg
3486 rmethod, r13,
3487 no_such_interface);
3488
3489 // rmethod,: Method to call
3490 // r2: receiver
3491 // Check for abstract method error
3492 // Note: This should be done more efficiently via a throw_abstract_method_error
3493 // interpreter entry point and a conditional jump to it in case of a null
3494 // method.
3495 __ cbz(rmethod, no_such_method);
3496
3497 __ profile_arguments_type(r3, rmethod, r13, true);
3498
3499 // do the call
3500 // r2: receiver
3501 // rmethod,: Method
3502 __ jump_from_interpreted(rmethod, r3);
3503 __ should_not_reach_here();
3504
3505 // exception handling code follows...
3506 // note: must restore interpreter registers to canonical
3507 // state for exception handling to work correctly!
3508
3509 __ bind(no_such_method);
3510 // throw exception
3511 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
3512 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3513 // Pass arguments for generating a verbose error message.
3514 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16);
3515 // the call_VM checks for exception, so we should never return here.
3516 __ should_not_reach_here();
3517
3518 __ bind(no_such_interface);
3519 // throw exception
3520 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
3521 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3522 // Pass arguments for generating a verbose error message.
3523 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3524 InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0);
3525 // the call_VM checks for exception, so we should never return here.
3526 __ should_not_reach_here();
3527 return;
3528 }
3529
3530 void TemplateTable::invokehandle(int byte_no) {
3531 transition(vtos, vtos);
3532 assert(byte_no == f1_byte, "use this argument");
3533
3534 load_resolved_method_entry_handle(r2, // ResolvedMethodEntry*
3535 rmethod, // Method*
3536 r0, // Resolved reference
3537 r3); // flags
3538 prepare_invoke(r2, r2);
3539
3540 __ verify_method_ptr(r2);
3541 __ verify_oop(r2);
3542 __ null_check(r2);
3543
3544 // FIXME: profile the LambdaForm also
3545
3546 // r13 is safe to use here as a scratch reg because it is about to
3547 // be clobbered by jump_from_interpreted().
3548 __ profile_final_call(r13);
3549 __ profile_arguments_type(r13, rmethod, r4, true);
3550
3551 __ jump_from_interpreted(rmethod, r0);
3552 }
3553
3554 void TemplateTable::invokedynamic(int byte_no) {
3555 transition(vtos, vtos);
3556 assert(byte_no == f1_byte, "use this argument");
3557
3558 load_invokedynamic_entry(rmethod);
3559
3560 // r0: CallSite object (from cpool->resolved_references[])
3561 // rmethod: MH.linkToCallSite method
3562
3563 // Note: r0_callsite is already pushed
3564
3565 // %%% should make a type profile for any invokedynamic that takes a ref argument
3566 // profile this call
3567 __ profile_call(rbcp);
3568 __ profile_arguments_type(r3, rmethod, r13, false);
3569
3570 __ verify_oop(r0);
3571
3572 __ jump_from_interpreted(rmethod, r0);
3573 }
3574
3575
3576 //-----------------------------------------------------------------------------
3577 // Allocation
3578
3579 void TemplateTable::_new() {
3580 transition(vtos, atos);
3581
3582 __ get_unsigned_2_byte_index_at_bcp(r3, 1);
3583 Label slow_case;
3584 Label done;
3585 Label initialize_header;
3586
3587 __ get_cpool_and_tags(r4, r0);
3588 // Make sure the class we're about to instantiate has been resolved.
3589 // This is done before loading InstanceKlass to be consistent with the order
3590 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3591 const int tags_offset = Array<u1>::base_offset_in_bytes();
3592 __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
3593 __ lea(rscratch1, Address(rscratch1, tags_offset));
3594 __ ldarb(rscratch1, rscratch1);
3595 __ cmp(rscratch1, (u1)JVM_CONSTANT_Class);
3596 __ br(Assembler::NE, slow_case);
3597
3598 // get InstanceKlass
3599 __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
3600
3601 // make sure klass is initialized
3602 assert(VM_Version::supports_fast_class_init_checks(), "Optimization requires support for fast class initialization checks");
3603 __ clinit_barrier(r4, rscratch1, nullptr /*L_fast_path*/, &slow_case);
3604
3605 // get instance_size in InstanceKlass (scaled to a count of bytes)
3606 __ ldrw(r3,
3607 Address(r4,
3608 Klass::layout_helper_offset()));
3609 // test to see if it is malformed in some way
3610 __ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3611
3612 // Allocate the instance:
3613 // If TLAB is enabled:
3614 // Try to allocate in the TLAB.
3615 // If fails, go to the slow path.
3616 // Initialize the allocation.
3617 // Exit.
3618 //
3619 // Go to slow path.
3620
3621 if (UseTLAB) {
3622 __ tlab_allocate(r0, r3, 0, noreg, r1, slow_case);
3623
3624 if (ZeroTLAB) {
3625 // the fields have been already cleared
3626 __ b(initialize_header);
3627 }
3628
3629 // The object is initialized before the header. If the object size is
3630 // zero, go directly to the header initialization.
3631 __ sub(r3, r3, sizeof(oopDesc));
3632 __ cbz(r3, initialize_header);
3633
3634 // Initialize object fields
3635 {
3636 __ add(r2, r0, sizeof(oopDesc));
3637 Label loop;
3638 __ bind(loop);
3639 __ str(zr, Address(__ post(r2, BytesPerLong)));
3640 __ sub(r3, r3, BytesPerLong);
3641 __ cbnz(r3, loop);
3642 }
3643
3644 // initialize object header only.
3645 __ bind(initialize_header);
3646 __ mov(rscratch1, (intptr_t)markWord::prototype().value());
3647 __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
3648 __ store_klass_gap(r0, zr); // zero klass gap for compressed oops
3649 __ store_klass(r0, r4); // store klass last
3650
3651 if (DTraceAllocProbes) {
3652 // Trigger dtrace event for fastpath
3653 __ push(atos); // save the return value
3654 __ call_VM_leaf(
3655 CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), r0);
3656 __ pop(atos); // restore the return value
3657
3658 }
3659 __ b(done);
3660 }
3661
3662 // slow case
3663 __ bind(slow_case);
3664 __ get_constant_pool(c_rarg1);
3665 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3666 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3667 __ verify_oop(r0);
3668
3669 // continue
3670 __ bind(done);
3671 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3672 __ membar(Assembler::StoreStore);
3673 }
3674
3675 void TemplateTable::newarray() {
3676 transition(itos, atos);
3677 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3678 __ mov(c_rarg2, r0);
3679 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3680 c_rarg1, c_rarg2);
3681 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3682 __ membar(Assembler::StoreStore);
3683 }
3684
3685 void TemplateTable::anewarray() {
3686 transition(itos, atos);
3687 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3688 __ get_constant_pool(c_rarg1);
3689 __ mov(c_rarg3, r0);
3690 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3691 c_rarg1, c_rarg2, c_rarg3);
3692 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3693 __ membar(Assembler::StoreStore);
3694 }
3695
3696 void TemplateTable::arraylength() {
3697 transition(atos, itos);
3698 __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes()));
3699 }
3700
3701 void TemplateTable::checkcast()
3702 {
3703 transition(atos, atos);
3704 Label done, is_null, ok_is_subtype, quicked, resolved;
3705 __ cbz(r0, is_null);
3706
3707 // Get cpool & tags index
3708 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3709 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3710 // See if bytecode has already been quicked
3711 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3712 __ lea(r1, Address(rscratch1, r19));
3713 __ ldarb(r1, r1);
3714 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3715 __ br(Assembler::EQ, quicked);
3716
3717 __ push(atos); // save receiver for result, and for GC
3718 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3719 // vm_result_2 has metadata result
3720 __ get_vm_result_2(r0, rthread);
3721 __ pop(r3); // restore receiver
3722 __ b(resolved);
3723
3724 // Get superklass in r0 and subklass in r3
3725 __ bind(quicked);
3726 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3727 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3728
3729 __ bind(resolved);
3730 __ load_klass(r19, r3);
3731
3732 // Generate subtype check. Blows r2, r5. Object in r3.
3733 // Superklass in r0. Subklass in r19.
3734 __ gen_subtype_check(r19, ok_is_subtype);
3735
3736 // Come here on failure
3737 __ push(r3);
3738 // object is at TOS
3739 __ b(Interpreter::_throw_ClassCastException_entry);
3740
3741 // Come here on success
3742 __ bind(ok_is_subtype);
3743 __ mov(r0, r3); // Restore object in r3
3744
3745 // Collect counts on whether this test sees nulls a lot or not.
3746 if (ProfileInterpreter) {
3747 __ b(done);
3748 __ bind(is_null);
3749 __ profile_null_seen(r2);
3750 } else {
3751 __ bind(is_null); // same as 'done'
3752 }
3753 __ bind(done);
3754 }
3755
3756 void TemplateTable::instanceof() {
3757 transition(atos, itos);
3758 Label done, is_null, ok_is_subtype, quicked, resolved;
3759 __ cbz(r0, is_null);
3760
3761 // Get cpool & tags index
3762 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3763 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3764 // See if bytecode has already been quicked
3765 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3766 __ lea(r1, Address(rscratch1, r19));
3767 __ ldarb(r1, r1);
3768 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3769 __ br(Assembler::EQ, quicked);
3770
3771 __ push(atos); // save receiver for result, and for GC
3772 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3773 // vm_result_2 has metadata result
3774 __ get_vm_result_2(r0, rthread);
3775 __ pop(r3); // restore receiver
3776 __ verify_oop(r3);
3777 __ load_klass(r3, r3);
3778 __ b(resolved);
3779
3780 // Get superklass in r0 and subklass in r3
3781 __ bind(quicked);
3782 __ load_klass(r3, r0);
3783 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
3784
3785 __ bind(resolved);
3786
3787 // Generate subtype check. Blows r2, r5
3788 // Superklass in r0. Subklass in r3.
3789 __ gen_subtype_check(r3, ok_is_subtype);
3790
3791 // Come here on failure
3792 __ mov(r0, 0);
3793 __ b(done);
3794 // Come here on success
3795 __ bind(ok_is_subtype);
3796 __ mov(r0, 1);
3797
3798 // Collect counts on whether this test sees nulls a lot or not.
3799 if (ProfileInterpreter) {
3800 __ b(done);
3801 __ bind(is_null);
3802 __ profile_null_seen(r2);
3803 } else {
3804 __ bind(is_null); // same as 'done'
3805 }
3806 __ bind(done);
3807 // r0 = 0: obj == nullptr or obj is not an instanceof the specified klass
3808 // r0 = 1: obj != nullptr and obj is an instanceof the specified klass
3809 }
3810
3811 //-----------------------------------------------------------------------------
3812 // Breakpoints
3813 void TemplateTable::_breakpoint() {
3814 // Note: We get here even if we are single stepping..
3815 // jbug inists on setting breakpoints at every bytecode
3816 // even if we are in single step mode.
3817
3818 transition(vtos, vtos);
3819
3820 // get the unpatched byte code
3821 __ get_method(c_rarg1);
3822 __ call_VM(noreg,
3823 CAST_FROM_FN_PTR(address,
3824 InterpreterRuntime::get_original_bytecode_at),
3825 c_rarg1, rbcp);
3826 __ mov(r19, r0);
3827
3828 // post the breakpoint event
3829 __ call_VM(noreg,
3830 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3831 rmethod, rbcp);
3832
3833 // complete the execution of original bytecode
3834 __ mov(rscratch1, r19);
3835 __ dispatch_only_normal(vtos);
3836 }
3837
3838 //-----------------------------------------------------------------------------
3839 // Exceptions
3840
3841 void TemplateTable::athrow() {
3842 transition(atos, vtos);
3843 __ null_check(r0);
3844 __ b(Interpreter::throw_exception_entry());
3845 }
3846
3847 //-----------------------------------------------------------------------------
3848 // Synchronization
3849 //
3850 // Note: monitorenter & exit are symmetric routines; which is reflected
3851 // in the assembly code structure as well
3852 //
3853 // Stack layout:
3854 //
3855 // [expressions ] <--- esp = expression stack top
3856 // ..
3857 // [expressions ]
3858 // [monitor entry] <--- monitor block top = expression stack bot
3859 // ..
3860 // [monitor entry]
3861 // [frame data ] <--- monitor block bot
3862 // ...
3863 // [saved rfp ] <--- rfp
3864 void TemplateTable::monitorenter()
3865 {
3866 transition(atos, vtos);
3867
3868 // check for null object
3869 __ null_check(r0);
3870
3871 const Address monitor_block_top(
3872 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3873 const Address monitor_block_bot(
3874 rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3875 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
3876
3877 Label allocated;
3878
3879 // initialize entry pointer
3880 __ mov(c_rarg1, zr); // points to free slot or null
3881
3882 // find a free slot in the monitor block (result in c_rarg1)
3883 {
3884 Label entry, loop, exit;
3885 __ ldr(c_rarg3, monitor_block_top); // derelativize pointer
3886 __ lea(c_rarg3, Address(rfp, c_rarg3, Address::lsl(Interpreter::logStackElementSize)));
3887 // c_rarg3 points to current entry, starting with top-most entry
3888
3889 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3890
3891 __ b(entry);
3892
3893 __ bind(loop);
3894 // check if current entry is used
3895 // if not used then remember entry in c_rarg1
3896 __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset()));
3897 __ cmp(zr, rscratch1);
3898 __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
3899 // check if current entry is for same object
3900 __ cmp(r0, rscratch1);
3901 // if same object then stop searching
3902 __ br(Assembler::EQ, exit);
3903 // otherwise advance to next entry
3904 __ add(c_rarg3, c_rarg3, entry_size);
3905 __ bind(entry);
3906 // check if bottom reached
3907 __ cmp(c_rarg3, c_rarg2);
3908 // if not at bottom then check this entry
3909 __ br(Assembler::NE, loop);
3910 __ bind(exit);
3911 }
3912
3913 __ cbnz(c_rarg1, allocated); // check if a slot has been found and
3914 // if found, continue with that on
3915
3916 // allocate one if there's no free slot
3917 {
3918 Label entry, loop;
3919 // 1. compute new pointers // rsp: old expression stack top
3920
3921 __ check_extended_sp();
3922 __ sub(sp, sp, entry_size); // make room for the monitor
3923 __ sub(rscratch1, sp, rfp);
3924 __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
3925 __ str(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
3926
3927 __ ldr(c_rarg1, monitor_block_bot); // derelativize pointer
3928 __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize)));
3929 // c_rarg1 points to the old expression stack bottom
3930
3931 __ sub(esp, esp, entry_size); // move expression stack top
3932 __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
3933 __ mov(c_rarg3, esp); // set start value for copy loop
3934 __ sub(rscratch1, c_rarg1, rfp); // relativize pointer
3935 __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
3936 __ str(rscratch1, monitor_block_bot); // set new monitor block bottom
3937
3938 __ b(entry);
3939 // 2. move expression stack contents
3940 __ bind(loop);
3941 __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3942 // word from old location
3943 __ str(c_rarg2, Address(c_rarg3, 0)); // and store it at new location
3944 __ add(c_rarg3, c_rarg3, wordSize); // advance to next word
3945 __ bind(entry);
3946 __ cmp(c_rarg3, c_rarg1); // check if bottom reached
3947 __ br(Assembler::NE, loop); // if not at bottom then
3948 // copy next word
3949 }
3950
3951 // call run-time routine
3952 // c_rarg1: points to monitor entry
3953 __ bind(allocated);
3954
3955 // Increment bcp to point to the next bytecode, so exception
3956 // handling for async. exceptions work correctly.
3957 // The object has already been popped from the stack, so the
3958 // expression stack looks correct.
3959 __ increment(rbcp);
3960
3961 // store object
3962 __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset()));
3963 __ lock_object(c_rarg1);
3964
3965 // check to make sure this monitor doesn't cause stack overflow after locking
3966 __ save_bcp(); // in case of exception
3967 __ generate_stack_overflow_check(0);
3968
3969 // The bcp has already been incremented. Just need to dispatch to
3970 // next instruction.
3971 __ dispatch_next(vtos);
3972 }
3973
3974
3975 void TemplateTable::monitorexit()
3976 {
3977 transition(atos, vtos);
3978
3979 // check for null object
3980 __ null_check(r0);
3981
3982 const Address monitor_block_top(
3983 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3984 const Address monitor_block_bot(
3985 rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3986 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
3987
3988 Label found;
3989
3990 // find matching slot
3991 {
3992 Label entry, loop;
3993 __ ldr(c_rarg1, monitor_block_top); // derelativize pointer
3994 __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize)));
3995 // c_rarg1 points to current entry, starting with top-most entry
3996
3997 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3998 // of monitor block
3999 __ b(entry);
4000
4001 __ bind(loop);
4002 // check if current entry is for same object
4003 __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset()));
4004 __ cmp(r0, rscratch1);
4005 // if same object then stop searching
4006 __ br(Assembler::EQ, found);
4007 // otherwise advance to next entry
4008 __ add(c_rarg1, c_rarg1, entry_size);
4009 __ bind(entry);
4010 // check if bottom reached
4011 __ cmp(c_rarg1, c_rarg2);
4012 // if not at bottom then check this entry
4013 __ br(Assembler::NE, loop);
4014 }
4015
4016 // error handling. Unlocking was not block-structured
4017 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4018 InterpreterRuntime::throw_illegal_monitor_state_exception));
4019 __ should_not_reach_here();
4020
4021 // call run-time routine
4022 __ bind(found);
4023 __ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
4024 __ unlock_object(c_rarg1);
4025 __ pop_ptr(r0); // discard object
4026 }
4027
4028
4029 // Wide instructions
4030 void TemplateTable::wide()
4031 {
4032 __ load_unsigned_byte(r19, at_bcp(1));
4033 __ mov(rscratch1, (address)Interpreter::_wentry_point);
4034 __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3)));
4035 __ br(rscratch1);
4036 }
4037
4038
4039 // Multi arrays
4040 void TemplateTable::multianewarray() {
4041 transition(vtos, atos);
4042 __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions
4043 // last dim is on top of stack; we want address of first one:
4044 // first_addr = last_addr + (ndims - 1) * wordSize
4045 __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3)));
4046 __ sub(c_rarg1, c_rarg1, wordSize);
4047 call_VM(r0,
4048 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
4049 c_rarg1);
4050 __ load_unsigned_byte(r1, at_bcp(3));
4051 __ lea(esp, Address(esp, r1, Address::uxtw(3)));
4052 }
--- EOF ---