1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "compiler/disassembler.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "gc/shared/gc_globals.hpp"
29 #include "gc/shared/tlab_globals.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "interpreter/interpreterRuntime.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "interpreter/templateTable.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/methodCounters.hpp"
36 #include "oops/methodData.hpp"
37 #include "oops/objArrayKlass.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "oops/resolvedFieldEntry.hpp"
40 #include "oops/resolvedIndyEntry.hpp"
41 #include "oops/resolvedMethodEntry.hpp"
42 #include "prims/jvmtiExport.hpp"
43 #include "prims/methodHandles.hpp"
44 #include "runtime/frame.inline.hpp"
45 #include "runtime/safepointMechanism.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubRoutines.hpp"
48 #include "runtime/synchronizer.hpp"
49 #include "utilities/macros.hpp"
50
51 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
52
53 // Global Register Names
54 static const Register rbcp = r13;
55 static const Register rlocals = r14;
56
57 // Address Computation: local variables
58 static inline Address iaddress(int n) {
59 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
60 }
61
62 static inline Address laddress(int n) {
63 return iaddress(n + 1);
64 }
65
66 static inline Address faddress(int n) {
67 return iaddress(n);
68 }
69
70 static inline Address daddress(int n) {
71 return laddress(n);
72 }
73
74 static inline Address aaddress(int n) {
75 return iaddress(n);
76 }
77
78 static inline Address iaddress(Register r) {
79 return Address(rlocals, r, Address::times_ptr);
80 }
81
82 static inline Address laddress(Register r) {
83 return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
84 }
85
86 static inline Address faddress(Register r) {
87 return iaddress(r);
88 }
89
90 static inline Address daddress(Register r) {
91 return laddress(r);
92 }
93
94 static inline Address aaddress(Register r) {
95 return iaddress(r);
96 }
97
98
99 // expression stack
100 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
101 // data beyond the rsp which is potentially unsafe in an MT environment;
102 // an interrupt may overwrite that data.)
103 static inline Address at_rsp () {
104 return Address(rsp, 0);
105 }
106
107 // At top of Java expression stack which may be different than esp(). It
108 // isn't for category 1 objects.
109 static inline Address at_tos () {
110 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
111 }
112
113 static inline Address at_tos_p1() {
114 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
115 }
116
117 static inline Address at_tos_p2() {
118 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
119 }
120
121 // Condition conversion
122 static Assembler::Condition j_not(TemplateTable::Condition cc) {
123 switch (cc) {
124 case TemplateTable::equal : return Assembler::notEqual;
125 case TemplateTable::not_equal : return Assembler::equal;
126 case TemplateTable::less : return Assembler::greaterEqual;
127 case TemplateTable::less_equal : return Assembler::greater;
128 case TemplateTable::greater : return Assembler::lessEqual;
129 case TemplateTable::greater_equal: return Assembler::less;
130 }
131 ShouldNotReachHere();
132 return Assembler::zero;
133 }
134
135
136
137 // Miscellaneous helper routines
138 // Store an oop (or null) at the address described by obj.
139 // If val == noreg this means store a null
140
141
142 static void do_oop_store(InterpreterMacroAssembler* _masm,
143 Address dst,
144 Register val,
145 DecoratorSet decorators = 0) {
146 assert(val == noreg || val == rax, "parameter is just for looks");
147 __ store_heap_oop(dst, val, rscratch2, r9, r8, decorators);
148 }
149
150 static void do_oop_load(InterpreterMacroAssembler* _masm,
151 Address src,
152 Register dst,
153 DecoratorSet decorators = 0) {
154 __ load_heap_oop(dst, src, rdx, decorators);
155 }
156
157 Address TemplateTable::at_bcp(int offset) {
158 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
159 return Address(rbcp, offset);
160 }
161
162
163 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
164 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
165 int byte_no) {
166 if (!RewriteBytecodes) return;
167 Label L_patch_done;
168
169 switch (bc) {
170 case Bytecodes::_fast_aputfield:
171 case Bytecodes::_fast_bputfield:
172 case Bytecodes::_fast_zputfield:
173 case Bytecodes::_fast_cputfield:
174 case Bytecodes::_fast_dputfield:
175 case Bytecodes::_fast_fputfield:
176 case Bytecodes::_fast_iputfield:
177 case Bytecodes::_fast_lputfield:
178 case Bytecodes::_fast_sputfield:
179 {
180 // We skip bytecode quickening for putfield instructions when
181 // the put_code written to the constant pool cache is zero.
182 // This is required so that every execution of this instruction
183 // calls out to InterpreterRuntime::resolve_get_put to do
184 // additional, required work.
185 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
186 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
187 __ load_field_entry(temp_reg, bc_reg);
188 if (byte_no == f1_byte) {
189 __ load_unsigned_byte(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset())));
190 } else {
191 __ load_unsigned_byte(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset())));
192 }
193
194 __ movl(bc_reg, bc);
195 __ cmpl(temp_reg, (int) 0);
196 __ jcc(Assembler::zero, L_patch_done); // don't patch
197 }
198 break;
199 default:
200 assert(byte_no == -1, "sanity");
201 // the pair bytecodes have already done the load.
202 if (load_bc_into_bc_reg) {
203 __ movl(bc_reg, bc);
204 }
205 }
206
207 if (JvmtiExport::can_post_breakpoint()) {
208 Label L_fast_patch;
209 // if a breakpoint is present we can't rewrite the stream directly
210 __ movzbl(temp_reg, at_bcp(0));
211 __ cmpl(temp_reg, Bytecodes::_breakpoint);
212 __ jcc(Assembler::notEqual, L_fast_patch);
213 __ get_method(temp_reg);
214 // Let breakpoint table handling rewrite to quicker bytecode
215 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
216 #ifndef ASSERT
217 __ jmpb(L_patch_done);
218 #else
219 __ jmp(L_patch_done);
220 #endif
221 __ bind(L_fast_patch);
222 }
223
224 #ifdef ASSERT
225 Label L_okay;
226 __ load_unsigned_byte(temp_reg, at_bcp(0));
227 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
228 __ jcc(Assembler::equal, L_okay);
229 __ cmpl(temp_reg, bc_reg);
230 __ jcc(Assembler::equal, L_okay);
231 __ stop("patching the wrong bytecode");
232 __ bind(L_okay);
233 #endif
234
235 // patch bytecode
236 __ movb(at_bcp(0), bc_reg);
237 __ bind(L_patch_done);
238 }
239 // Individual instructions
240
241
242 void TemplateTable::nop() {
243 transition(vtos, vtos);
244 // nothing to do
245 }
246
247 void TemplateTable::shouldnotreachhere() {
248 transition(vtos, vtos);
249 __ stop("shouldnotreachhere bytecode");
250 }
251
252 void TemplateTable::aconst_null() {
253 transition(vtos, atos);
254 __ xorl(rax, rax);
255 }
256
257 void TemplateTable::iconst(int value) {
258 transition(vtos, itos);
259 if (value == 0) {
260 __ xorl(rax, rax);
261 } else {
262 __ movl(rax, value);
263 }
264 }
265
266 void TemplateTable::lconst(int value) {
267 transition(vtos, ltos);
268 if (value == 0) {
269 __ xorl(rax, rax);
270 } else {
271 __ movl(rax, value);
272 }
273 }
274
275
276
277 void TemplateTable::fconst(int value) {
278 transition(vtos, ftos);
279 static float one = 1.0f, two = 2.0f;
280 switch (value) {
281 case 0:
282 __ xorps(xmm0, xmm0);
283 break;
284 case 1:
285 __ movflt(xmm0, ExternalAddress((address) &one), rscratch1);
286 break;
287 case 2:
288 __ movflt(xmm0, ExternalAddress((address) &two), rscratch1);
289 break;
290 default:
291 ShouldNotReachHere();
292 break;
293 }
294 }
295
296 void TemplateTable::dconst(int value) {
297 transition(vtos, dtos);
298 static double one = 1.0;
299 switch (value) {
300 case 0:
301 __ xorpd(xmm0, xmm0);
302 break;
303 case 1:
304 __ movdbl(xmm0, ExternalAddress((address) &one), rscratch1);
305 break;
306 default:
307 ShouldNotReachHere();
308 break;
309 }
310 }
311
312 void TemplateTable::bipush() {
313 transition(vtos, itos);
314 __ load_signed_byte(rax, at_bcp(1));
315 }
316
317 void TemplateTable::sipush() {
318 transition(vtos, itos);
319 __ load_unsigned_short(rax, at_bcp(1));
320 __ bswapl(rax);
321 __ sarl(rax, 16);
322 }
323
324 void TemplateTable::ldc(LdcType type) {
325 transition(vtos, vtos);
326 Register rarg = c_rarg1;
327 Label call_ldc, notFloat, notClass, notInt, Done;
328
329 if (is_ldc_wide(type)) {
330 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
331 } else {
332 __ load_unsigned_byte(rbx, at_bcp(1));
333 }
334
335 __ get_cpool_and_tags(rcx, rax);
336 const int base_offset = ConstantPool::header_size() * wordSize;
337 const int tags_offset = Array<u1>::base_offset_in_bytes();
338
339 // get type
340 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
341
342 // unresolved class - get the resolved class
343 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
344 __ jccb(Assembler::equal, call_ldc);
345
346 // unresolved class in error state - call into runtime to throw the error
347 // from the first resolution attempt
348 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
349 __ jccb(Assembler::equal, call_ldc);
350
351 // resolved class - need to call vm to get java mirror of the class
352 __ cmpl(rdx, JVM_CONSTANT_Class);
353 __ jcc(Assembler::notEqual, notClass);
354
355 __ bind(call_ldc);
356
357 __ movl(rarg, is_ldc_wide(type) ? 1 : 0);
358 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
359
360 __ push(atos);
361 __ jmp(Done);
362
363 __ bind(notClass);
364 __ cmpl(rdx, JVM_CONSTANT_Float);
365 __ jccb(Assembler::notEqual, notFloat);
366
367 // ftos
368 __ movflt(xmm0, Address(rcx, rbx, Address::times_ptr, base_offset));
369 __ push(ftos);
370 __ jmp(Done);
371
372 __ bind(notFloat);
373 __ cmpl(rdx, JVM_CONSTANT_Integer);
374 __ jccb(Assembler::notEqual, notInt);
375
376 // itos
377 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
378 __ push(itos);
379 __ jmp(Done);
380
381 // assume the tag is for condy; if not, the VM runtime will tell us
382 __ bind(notInt);
383 condy_helper(Done);
384
385 __ bind(Done);
386 }
387
388 // Fast path for caching oop constants.
389 void TemplateTable::fast_aldc(LdcType type) {
390 transition(vtos, atos);
391
392 Register result = rax;
393 Register tmp = rdx;
394 Register rarg = c_rarg1;
395 int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
396
397 Label resolved;
398
399 // We are resolved if the resolved reference cache entry contains a
400 // non-null object (String, MethodType, etc.)
401 assert_different_registers(result, tmp);
402 __ get_cache_index_at_bcp(tmp, 1, index_size);
403 __ load_resolved_reference_at_index(result, tmp);
404 __ testptr(result, result);
405 __ jcc(Assembler::notZero, resolved);
406
407 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
408
409 // first time invocation - must resolve first
410 __ movl(rarg, (int)bytecode());
411 __ call_VM(result, entry, rarg);
412 __ bind(resolved);
413
414 { // Check for the null sentinel.
415 // If we just called the VM, it already did the mapping for us,
416 // but it's harmless to retry.
417 Label notNull;
418 ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
419 __ movptr(tmp, null_sentinel);
420 __ resolve_oop_handle(tmp, rscratch2);
421 __ cmpoop(tmp, result);
422 __ jccb(Assembler::notEqual, notNull);
423 __ xorptr(result, result); // null object reference
424 __ bind(notNull);
425 }
426
427 if (VerifyOops) {
428 __ verify_oop(result);
429 }
430 }
431
432 void TemplateTable::ldc2_w() {
433 transition(vtos, vtos);
434 Label notDouble, notLong, Done;
435 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
436
437 __ get_cpool_and_tags(rcx, rax);
438 const int base_offset = ConstantPool::header_size() * wordSize;
439 const int tags_offset = Array<u1>::base_offset_in_bytes();
440
441 // get type
442 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
443 __ cmpl(rdx, JVM_CONSTANT_Double);
444 __ jccb(Assembler::notEqual, notDouble);
445
446 // dtos
447 __ movdbl(xmm0, Address(rcx, rbx, Address::times_ptr, base_offset));
448 __ push(dtos);
449
450 __ jmp(Done);
451 __ bind(notDouble);
452 __ cmpl(rdx, JVM_CONSTANT_Long);
453 __ jccb(Assembler::notEqual, notLong);
454
455 // ltos
456 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
457 __ push(ltos);
458 __ jmp(Done);
459
460 __ bind(notLong);
461 condy_helper(Done);
462
463 __ bind(Done);
464 }
465
466 void TemplateTable::condy_helper(Label& Done) {
467 const Register obj = rax;
468 const Register off = rbx;
469 const Register flags = rcx;
470 const Register rarg = c_rarg1;
471 __ movl(rarg, (int)bytecode());
472 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
473 __ get_vm_result_metadata(flags);
474 // VMr = obj = base address to find primitive value to push
475 // VMr2 = flags = (tos, off) using format of CPCE::_flags
476 __ movl(off, flags);
477 __ andl(off, ConstantPoolCache::field_index_mask);
478 const Address field(obj, off, Address::times_1, 0*wordSize);
479
480 // What sort of thing are we loading?
481 __ shrl(flags, ConstantPoolCache::tos_state_shift);
482 __ andl(flags, ConstantPoolCache::tos_state_mask);
483
484 switch (bytecode()) {
485 case Bytecodes::_ldc:
486 case Bytecodes::_ldc_w:
487 {
488 // tos in (itos, ftos, stos, btos, ctos, ztos)
489 Label notInt, notFloat, notShort, notByte, notChar, notBool;
490 __ cmpl(flags, itos);
491 __ jccb(Assembler::notEqual, notInt);
492 // itos
493 __ movl(rax, field);
494 __ push(itos);
495 __ jmp(Done);
496
497 __ bind(notInt);
498 __ cmpl(flags, ftos);
499 __ jccb(Assembler::notEqual, notFloat);
500 // ftos
501 __ movflt(xmm0, field);
502 __ push(ftos);
503 __ jmp(Done);
504
505 __ bind(notFloat);
506 __ cmpl(flags, stos);
507 __ jccb(Assembler::notEqual, notShort);
508 // stos
509 __ load_signed_short(rax, field);
510 __ push(stos);
511 __ jmp(Done);
512
513 __ bind(notShort);
514 __ cmpl(flags, btos);
515 __ jccb(Assembler::notEqual, notByte);
516 // btos
517 __ load_signed_byte(rax, field);
518 __ push(btos);
519 __ jmp(Done);
520
521 __ bind(notByte);
522 __ cmpl(flags, ctos);
523 __ jccb(Assembler::notEqual, notChar);
524 // ctos
525 __ load_unsigned_short(rax, field);
526 __ push(ctos);
527 __ jmp(Done);
528
529 __ bind(notChar);
530 __ cmpl(flags, ztos);
531 __ jccb(Assembler::notEqual, notBool);
532 // ztos
533 __ load_signed_byte(rax, field);
534 __ push(ztos);
535 __ jmp(Done);
536
537 __ bind(notBool);
538 break;
539 }
540
541 case Bytecodes::_ldc2_w:
542 {
543 Label notLong, notDouble;
544 __ cmpl(flags, ltos);
545 __ jccb(Assembler::notEqual, notLong);
546 // ltos
547 // Loading high word first because movptr clobbers rax
548 __ movptr(rax, field);
549 __ push(ltos);
550 __ jmp(Done);
551
552 __ bind(notLong);
553 __ cmpl(flags, dtos);
554 __ jccb(Assembler::notEqual, notDouble);
555 // dtos
556 __ movdbl(xmm0, field);
557 __ push(dtos);
558 __ jmp(Done);
559
560 __ bind(notDouble);
561 break;
562 }
563
564 default:
565 ShouldNotReachHere();
566 }
567
568 __ stop("bad ldc/condy");
569 }
570
571 void TemplateTable::locals_index(Register reg, int offset) {
572 __ load_unsigned_byte(reg, at_bcp(offset));
573 __ negptr(reg);
574 }
575
576 void TemplateTable::iload() {
577 iload_internal();
578 }
579
580 void TemplateTable::nofast_iload() {
581 iload_internal(may_not_rewrite);
582 }
583
584 void TemplateTable::iload_internal(RewriteControl rc) {
585 transition(vtos, itos);
586 if (RewriteFrequentPairs && rc == may_rewrite) {
587 Label rewrite, done;
588 const Register bc = c_rarg3;
589 assert(rbx != bc, "register damaged");
590
591 // get next byte
592 __ load_unsigned_byte(rbx,
593 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
594 // if _iload, wait to rewrite to iload2. We only want to rewrite the
595 // last two iloads in a pair. Comparing against fast_iload means that
596 // the next bytecode is neither an iload or a caload, and therefore
597 // an iload pair.
598 __ cmpl(rbx, Bytecodes::_iload);
599 __ jcc(Assembler::equal, done);
600
601 __ cmpl(rbx, Bytecodes::_fast_iload);
602 __ movl(bc, Bytecodes::_fast_iload2);
603
604 __ jccb(Assembler::equal, rewrite);
605
606 // if _caload, rewrite to fast_icaload
607 __ cmpl(rbx, Bytecodes::_caload);
608 __ movl(bc, Bytecodes::_fast_icaload);
609 __ jccb(Assembler::equal, rewrite);
610
611 // rewrite so iload doesn't check again.
612 __ movl(bc, Bytecodes::_fast_iload);
613
614 // rewrite
615 // bc: fast bytecode
616 __ bind(rewrite);
617 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
618 __ bind(done);
619 }
620
621 // Get the local value into tos
622 locals_index(rbx);
623 __ movl(rax, iaddress(rbx));
624 }
625
626 void TemplateTable::fast_iload2() {
627 transition(vtos, itos);
628 locals_index(rbx);
629 __ movl(rax, iaddress(rbx));
630 __ push(itos);
631 locals_index(rbx, 3);
632 __ movl(rax, iaddress(rbx));
633 }
634
635 void TemplateTable::fast_iload() {
636 transition(vtos, itos);
637 locals_index(rbx);
638 __ movl(rax, iaddress(rbx));
639 }
640
641 void TemplateTable::lload() {
642 transition(vtos, ltos);
643 locals_index(rbx);
644 __ movptr(rax, laddress(rbx));
645 }
646
647 void TemplateTable::fload() {
648 transition(vtos, ftos);
649 locals_index(rbx);
650 __ movflt(xmm0, faddress(rbx));
651 }
652
653 void TemplateTable::dload() {
654 transition(vtos, dtos);
655 locals_index(rbx);
656 __ movdbl(xmm0, daddress(rbx));
657 }
658
659 void TemplateTable::aload() {
660 transition(vtos, atos);
661 locals_index(rbx);
662 __ movptr(rax, aaddress(rbx));
663 }
664
665 void TemplateTable::locals_index_wide(Register reg) {
666 __ load_unsigned_short(reg, at_bcp(2));
667 __ bswapl(reg);
668 __ shrl(reg, 16);
669 __ negptr(reg);
670 }
671
672 void TemplateTable::wide_iload() {
673 transition(vtos, itos);
674 locals_index_wide(rbx);
675 __ movl(rax, iaddress(rbx));
676 }
677
678 void TemplateTable::wide_lload() {
679 transition(vtos, ltos);
680 locals_index_wide(rbx);
681 __ movptr(rax, laddress(rbx));
682 }
683
684 void TemplateTable::wide_fload() {
685 transition(vtos, ftos);
686 locals_index_wide(rbx);
687 __ movflt(xmm0, faddress(rbx));
688 }
689
690 void TemplateTable::wide_dload() {
691 transition(vtos, dtos);
692 locals_index_wide(rbx);
693 __ movdbl(xmm0, daddress(rbx));
694 }
695
696 void TemplateTable::wide_aload() {
697 transition(vtos, atos);
698 locals_index_wide(rbx);
699 __ movptr(rax, aaddress(rbx));
700 }
701
702 void TemplateTable::index_check(Register array, Register index) {
703 // Pop ptr into array
704 __ pop_ptr(array);
705 index_check_without_pop(array, index);
706 }
707
708 void TemplateTable::index_check_without_pop(Register array, Register index) {
709 // destroys rbx
710 // sign extend index for use by indexed load
711 __ movl2ptr(index, index);
712 // check index
713 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
714 if (index != rbx) {
715 // ??? convention: move aberrant index into rbx for exception message
716 assert(rbx != array, "different registers");
717 __ movl(rbx, index);
718 }
719 Label skip;
720 __ jccb(Assembler::below, skip);
721 // Pass array to create more detailed exceptions.
722 __ mov(c_rarg1, array);
723 __ jump(RuntimeAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
724 __ bind(skip);
725 }
726
727 void TemplateTable::iaload() {
728 transition(itos, itos);
729 // rax: index
730 // rdx: array
731 index_check(rdx, rax); // kills rbx
732 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
733 Address(rdx, rax, Address::times_4,
734 arrayOopDesc::base_offset_in_bytes(T_INT)),
735 noreg);
736 }
737
738 void TemplateTable::laload() {
739 transition(itos, ltos);
740 // rax: index
741 // rdx: array
742 index_check(rdx, rax); // kills rbx
743 // rbx,: index
744 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
745 Address(rdx, rbx, Address::times_8,
746 arrayOopDesc::base_offset_in_bytes(T_LONG)),
747 noreg);
748 }
749
750
751
752 void TemplateTable::faload() {
753 transition(itos, ftos);
754 // rax: index
755 // rdx: array
756 index_check(rdx, rax); // kills rbx
757 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
758 Address(rdx, rax,
759 Address::times_4,
760 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
761 noreg);
762 }
763
764 void TemplateTable::daload() {
765 transition(itos, dtos);
766 // rax: index
767 // rdx: array
768 index_check(rdx, rax); // kills rbx
769 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
770 Address(rdx, rax,
771 Address::times_8,
772 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
773 noreg);
774 }
775
776 void TemplateTable::aaload() {
777 transition(itos, atos);
778 // rax: index
779 // rdx: array
780 index_check(rdx, rax); // kills rbx
781 do_oop_load(_masm,
782 Address(rdx, rax,
783 UseCompressedOops ? Address::times_4 : Address::times_ptr,
784 arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
785 rax,
786 IS_ARRAY);
787 }
788
789 void TemplateTable::baload() {
790 transition(itos, itos);
791 // rax: index
792 // rdx: array
793 index_check(rdx, rax); // kills rbx
794 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
795 Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
796 noreg);
797 }
798
799 void TemplateTable::caload() {
800 transition(itos, itos);
801 // rax: index
802 // rdx: array
803 index_check(rdx, rax); // kills rbx
804 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
805 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
806 noreg);
807 }
808
809 // iload followed by caload frequent pair
810 void TemplateTable::fast_icaload() {
811 transition(vtos, itos);
812 // load index out of locals
813 locals_index(rbx);
814 __ movl(rax, iaddress(rbx));
815
816 // rax: index
817 // rdx: array
818 index_check(rdx, rax); // kills rbx
819 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
820 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
821 noreg);
822 }
823
824
825 void TemplateTable::saload() {
826 transition(itos, itos);
827 // rax: index
828 // rdx: array
829 index_check(rdx, rax); // kills rbx
830 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
831 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
832 noreg);
833 }
834
835 void TemplateTable::iload(int n) {
836 transition(vtos, itos);
837 __ movl(rax, iaddress(n));
838 }
839
840 void TemplateTable::lload(int n) {
841 transition(vtos, ltos);
842 __ movptr(rax, laddress(n));
843 }
844
845 void TemplateTable::fload(int n) {
846 transition(vtos, ftos);
847 __ movflt(xmm0, faddress(n));
848 }
849
850 void TemplateTable::dload(int n) {
851 transition(vtos, dtos);
852 __ movdbl(xmm0, daddress(n));
853 }
854
855 void TemplateTable::aload(int n) {
856 transition(vtos, atos);
857 __ movptr(rax, aaddress(n));
858 }
859
860 void TemplateTable::aload_0() {
861 aload_0_internal();
862 }
863
864 void TemplateTable::nofast_aload_0() {
865 aload_0_internal(may_not_rewrite);
866 }
867
868 void TemplateTable::aload_0_internal(RewriteControl rc) {
869 transition(vtos, atos);
870 // According to bytecode histograms, the pairs:
871 //
872 // _aload_0, _fast_igetfield
873 // _aload_0, _fast_agetfield
874 // _aload_0, _fast_fgetfield
875 //
876 // occur frequently. If RewriteFrequentPairs is set, the (slow)
877 // _aload_0 bytecode checks if the next bytecode is either
878 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
879 // rewrites the current bytecode into a pair bytecode; otherwise it
880 // rewrites the current bytecode into _fast_aload_0 that doesn't do
881 // the pair check anymore.
882 //
883 // Note: If the next bytecode is _getfield, the rewrite must be
884 // delayed, otherwise we may miss an opportunity for a pair.
885 //
886 // Also rewrite frequent pairs
887 // aload_0, aload_1
888 // aload_0, iload_1
889 // These bytecodes with a small amount of code are most profitable
890 // to rewrite
891 if (RewriteFrequentPairs && rc == may_rewrite) {
892 Label rewrite, done;
893
894 const Register bc = c_rarg3;
895 assert(rbx != bc, "register damaged");
896
897 // get next byte
898 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
899
900 // if _getfield then wait with rewrite
901 __ cmpl(rbx, Bytecodes::_getfield);
902 __ jcc(Assembler::equal, done);
903
904 // if _igetfield then rewrite to _fast_iaccess_0
905 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
906 __ cmpl(rbx, Bytecodes::_fast_igetfield);
907 __ movl(bc, Bytecodes::_fast_iaccess_0);
908 __ jccb(Assembler::equal, rewrite);
909
910 // if _agetfield then rewrite to _fast_aaccess_0
911 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
912 __ cmpl(rbx, Bytecodes::_fast_agetfield);
913 __ movl(bc, Bytecodes::_fast_aaccess_0);
914 __ jccb(Assembler::equal, rewrite);
915
916 // if _fgetfield then rewrite to _fast_faccess_0
917 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
918 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
919 __ movl(bc, Bytecodes::_fast_faccess_0);
920 __ jccb(Assembler::equal, rewrite);
921
922 // else rewrite to _fast_aload0
923 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
924 __ movl(bc, Bytecodes::_fast_aload_0);
925
926 // rewrite
927 // bc: fast bytecode
928 __ bind(rewrite);
929 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
930
931 __ bind(done);
932 }
933
934 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
935 aload(0);
936 }
937
938 void TemplateTable::istore() {
939 transition(itos, vtos);
940 locals_index(rbx);
941 __ movl(iaddress(rbx), rax);
942 }
943
944
945 void TemplateTable::lstore() {
946 transition(ltos, vtos);
947 locals_index(rbx);
948 __ movptr(laddress(rbx), rax);
949 }
950
951 void TemplateTable::fstore() {
952 transition(ftos, vtos);
953 locals_index(rbx);
954 __ movflt(faddress(rbx), xmm0);
955 }
956
957 void TemplateTable::dstore() {
958 transition(dtos, vtos);
959 locals_index(rbx);
960 __ movdbl(daddress(rbx), xmm0);
961 }
962
963 void TemplateTable::astore() {
964 transition(vtos, vtos);
965 __ pop_ptr(rax);
966 locals_index(rbx);
967 __ movptr(aaddress(rbx), rax);
968 }
969
970 void TemplateTable::wide_istore() {
971 transition(vtos, vtos);
972 __ pop_i();
973 locals_index_wide(rbx);
974 __ movl(iaddress(rbx), rax);
975 }
976
977 void TemplateTable::wide_lstore() {
978 transition(vtos, vtos);
979 __ pop_l();
980 locals_index_wide(rbx);
981 __ movptr(laddress(rbx), rax);
982 }
983
984 void TemplateTable::wide_fstore() {
985 transition(vtos, vtos);
986 __ pop_f(xmm0);
987 locals_index_wide(rbx);
988 __ movflt(faddress(rbx), xmm0);
989 }
990
991 void TemplateTable::wide_dstore() {
992 transition(vtos, vtos);
993 __ pop_d(xmm0);
994 locals_index_wide(rbx);
995 __ movdbl(daddress(rbx), xmm0);
996 }
997
998 void TemplateTable::wide_astore() {
999 transition(vtos, vtos);
1000 __ pop_ptr(rax);
1001 locals_index_wide(rbx);
1002 __ movptr(aaddress(rbx), rax);
1003 }
1004
1005 void TemplateTable::iastore() {
1006 transition(itos, vtos);
1007 __ pop_i(rbx);
1008 // rax: value
1009 // rbx: index
1010 // rdx: array
1011 index_check(rdx, rbx); // prefer index in rbx
1012 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1013 Address(rdx, rbx, Address::times_4,
1014 arrayOopDesc::base_offset_in_bytes(T_INT)),
1015 rax, noreg, noreg, noreg);
1016 }
1017
1018 void TemplateTable::lastore() {
1019 transition(ltos, vtos);
1020 __ pop_i(rbx);
1021 // rax,: low(value)
1022 // rcx: array
1023 // rdx: high(value)
1024 index_check(rcx, rbx); // prefer index in rbx,
1025 // rbx,: index
1026 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1027 Address(rcx, rbx, Address::times_8,
1028 arrayOopDesc::base_offset_in_bytes(T_LONG)),
1029 noreg /* ltos */, noreg, noreg, noreg);
1030 }
1031
1032
1033 void TemplateTable::fastore() {
1034 transition(ftos, vtos);
1035 __ pop_i(rbx);
1036 // value is in xmm0
1037 // rbx: index
1038 // rdx: array
1039 index_check(rdx, rbx); // prefer index in rbx
1040 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1041 Address(rdx, rbx, Address::times_4,
1042 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1043 noreg /* ftos */, noreg, noreg, noreg);
1044 }
1045
1046 void TemplateTable::dastore() {
1047 transition(dtos, vtos);
1048 __ pop_i(rbx);
1049 // value is in xmm0
1050 // rbx: index
1051 // rdx: array
1052 index_check(rdx, rbx); // prefer index in rbx
1053 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1054 Address(rdx, rbx, Address::times_8,
1055 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1056 noreg /* dtos */, noreg, noreg, noreg);
1057 }
1058
1059 void TemplateTable::aastore() {
1060 Label is_null, ok_is_subtype, done;
1061 transition(vtos, vtos);
1062 // stack: ..., array, index, value
1063 __ movptr(rax, at_tos()); // value
1064 __ movl(rcx, at_tos_p1()); // index
1065 __ movptr(rdx, at_tos_p2()); // array
1066
1067 Address element_address(rdx, rcx,
1068 UseCompressedOops? Address::times_4 : Address::times_ptr,
1069 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1070
1071 index_check_without_pop(rdx, rcx); // kills rbx
1072 __ testptr(rax, rax);
1073 __ jcc(Assembler::zero, is_null);
1074
1075 // Move subklass into rbx
1076 __ load_klass(rbx, rax, rscratch1);
1077 // Move superklass into rax
1078 __ load_klass(rax, rdx, rscratch1);
1079 __ movptr(rax, Address(rax,
1080 ObjArrayKlass::element_klass_offset()));
1081
1082 // Generate subtype check. Blows rcx, rdi
1083 // Superklass in rax. Subklass in rbx.
1084 __ gen_subtype_check(rbx, ok_is_subtype);
1085
1086 // Come here on failure
1087 // object is at TOS
1088 __ jump(RuntimeAddress(Interpreter::_throw_ArrayStoreException_entry));
1089
1090 // Come here on success
1091 __ bind(ok_is_subtype);
1092
1093 // Get the value we will store
1094 __ movptr(rax, at_tos());
1095 __ movl(rcx, at_tos_p1()); // index
1096 // Now store using the appropriate barrier
1097 do_oop_store(_masm, element_address, rax, IS_ARRAY);
1098 __ jmp(done);
1099
1100 // Have a null in rax, rdx=array, ecx=index. Store null at ary[idx]
1101 __ bind(is_null);
1102 __ profile_null_seen(rbx);
1103
1104 // Store a null
1105 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1106
1107 // Pop stack arguments
1108 __ bind(done);
1109 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1110 }
1111
1112 void TemplateTable::bastore() {
1113 transition(itos, vtos);
1114 __ pop_i(rbx);
1115 // rax: value
1116 // rbx: index
1117 // rdx: array
1118 index_check(rdx, rbx); // prefer index in rbx
1119 // Need to check whether array is boolean or byte
1120 // since both types share the bastore bytecode.
1121 __ load_klass(rcx, rdx, rscratch1);
1122 __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1123 int diffbit = Klass::layout_helper_boolean_diffbit();
1124 __ testl(rcx, diffbit);
1125 Label L_skip;
1126 __ jccb(Assembler::zero, L_skip);
1127 __ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1128 __ bind(L_skip);
1129 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1130 Address(rdx, rbx,Address::times_1,
1131 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1132 rax, noreg, noreg, noreg);
1133 }
1134
1135 void TemplateTable::castore() {
1136 transition(itos, vtos);
1137 __ pop_i(rbx);
1138 // rax: value
1139 // rbx: index
1140 // rdx: array
1141 index_check(rdx, rbx); // prefer index in rbx
1142 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1143 Address(rdx, rbx, Address::times_2,
1144 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1145 rax, noreg, noreg, noreg);
1146 }
1147
1148
1149 void TemplateTable::sastore() {
1150 castore();
1151 }
1152
1153 void TemplateTable::istore(int n) {
1154 transition(itos, vtos);
1155 __ movl(iaddress(n), rax);
1156 }
1157
1158 void TemplateTable::lstore(int n) {
1159 transition(ltos, vtos);
1160 __ movptr(laddress(n), rax);
1161 }
1162
1163 void TemplateTable::fstore(int n) {
1164 transition(ftos, vtos);
1165 __ movflt(faddress(n), xmm0);
1166 }
1167
1168 void TemplateTable::dstore(int n) {
1169 transition(dtos, vtos);
1170 __ movdbl(daddress(n), xmm0);
1171 }
1172
1173
1174 void TemplateTable::astore(int n) {
1175 transition(vtos, vtos);
1176 __ pop_ptr(rax);
1177 __ movptr(aaddress(n), rax);
1178 }
1179
1180 void TemplateTable::pop() {
1181 transition(vtos, vtos);
1182 __ addptr(rsp, Interpreter::stackElementSize);
1183 }
1184
1185 void TemplateTable::pop2() {
1186 transition(vtos, vtos);
1187 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1188 }
1189
1190
1191 void TemplateTable::dup() {
1192 transition(vtos, vtos);
1193 __ load_ptr(0, rax);
1194 __ push_ptr(rax);
1195 // stack: ..., a, a
1196 }
1197
1198 void TemplateTable::dup_x1() {
1199 transition(vtos, vtos);
1200 // stack: ..., a, b
1201 __ load_ptr( 0, rax); // load b
1202 __ load_ptr( 1, rcx); // load a
1203 __ store_ptr(1, rax); // store b
1204 __ store_ptr(0, rcx); // store a
1205 __ push_ptr(rax); // push b
1206 // stack: ..., b, a, b
1207 }
1208
1209 void TemplateTable::dup_x2() {
1210 transition(vtos, vtos);
1211 // stack: ..., a, b, c
1212 __ load_ptr( 0, rax); // load c
1213 __ load_ptr( 2, rcx); // load a
1214 __ store_ptr(2, rax); // store c in a
1215 __ push_ptr(rax); // push c
1216 // stack: ..., c, b, c, c
1217 __ load_ptr( 2, rax); // load b
1218 __ store_ptr(2, rcx); // store a in b
1219 // stack: ..., c, a, c, c
1220 __ store_ptr(1, rax); // store b in c
1221 // stack: ..., c, a, b, c
1222 }
1223
1224 void TemplateTable::dup2() {
1225 transition(vtos, vtos);
1226 // stack: ..., a, b
1227 __ load_ptr(1, rax); // load a
1228 __ push_ptr(rax); // push a
1229 __ load_ptr(1, rax); // load b
1230 __ push_ptr(rax); // push b
1231 // stack: ..., a, b, a, b
1232 }
1233
1234
1235 void TemplateTable::dup2_x1() {
1236 transition(vtos, vtos);
1237 // stack: ..., a, b, c
1238 __ load_ptr( 0, rcx); // load c
1239 __ load_ptr( 1, rax); // load b
1240 __ push_ptr(rax); // push b
1241 __ push_ptr(rcx); // push c
1242 // stack: ..., a, b, c, b, c
1243 __ store_ptr(3, rcx); // store c in b
1244 // stack: ..., a, c, c, b, c
1245 __ load_ptr( 4, rcx); // load a
1246 __ store_ptr(2, rcx); // store a in 2nd c
1247 // stack: ..., a, c, a, b, c
1248 __ store_ptr(4, rax); // store b in a
1249 // stack: ..., b, c, a, b, c
1250 }
1251
1252 void TemplateTable::dup2_x2() {
1253 transition(vtos, vtos);
1254 // stack: ..., a, b, c, d
1255 __ load_ptr( 0, rcx); // load d
1256 __ load_ptr( 1, rax); // load c
1257 __ push_ptr(rax); // push c
1258 __ push_ptr(rcx); // push d
1259 // stack: ..., a, b, c, d, c, d
1260 __ load_ptr( 4, rax); // load b
1261 __ store_ptr(2, rax); // store b in d
1262 __ store_ptr(4, rcx); // store d in b
1263 // stack: ..., a, d, c, b, c, d
1264 __ load_ptr( 5, rcx); // load a
1265 __ load_ptr( 3, rax); // load c
1266 __ store_ptr(3, rcx); // store a in c
1267 __ store_ptr(5, rax); // store c in a
1268 // stack: ..., c, d, a, b, c, d
1269 }
1270
1271 void TemplateTable::swap() {
1272 transition(vtos, vtos);
1273 // stack: ..., a, b
1274 __ load_ptr( 1, rcx); // load a
1275 __ load_ptr( 0, rax); // load b
1276 __ store_ptr(0, rcx); // store a in b
1277 __ store_ptr(1, rax); // store b in a
1278 // stack: ..., b, a
1279 }
1280
1281 void TemplateTable::iop2(Operation op) {
1282 transition(itos, itos);
1283 switch (op) {
1284 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1285 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1286 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1287 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1288 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1289 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1290 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1291 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1292 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1293 default : ShouldNotReachHere();
1294 }
1295 }
1296
1297 void TemplateTable::lop2(Operation op) {
1298 transition(ltos, ltos);
1299 switch (op) {
1300 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1301 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1302 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1303 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1304 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1305 default : ShouldNotReachHere();
1306 }
1307 }
1308
1309 void TemplateTable::idiv() {
1310 transition(itos, itos);
1311 __ movl(rcx, rax);
1312 __ pop_i(rax);
1313 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1314 // they are not equal, one could do a normal division (no correction
1315 // needed), which may speed up this implementation for the common case.
1316 // (see also JVM spec., p.243 & p.271)
1317 __ corrected_idivl(rcx);
1318 }
1319
1320 void TemplateTable::irem() {
1321 transition(itos, itos);
1322 __ movl(rcx, rax);
1323 __ pop_i(rax);
1324 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1325 // they are not equal, one could do a normal division (no correction
1326 // needed), which may speed up this implementation for the common case.
1327 // (see also JVM spec., p.243 & p.271)
1328 __ corrected_idivl(rcx);
1329 __ movl(rax, rdx);
1330 }
1331
1332 void TemplateTable::lmul() {
1333 transition(ltos, ltos);
1334 __ pop_l(rdx);
1335 __ imulq(rax, rdx);
1336 }
1337
1338 void TemplateTable::ldiv() {
1339 transition(ltos, ltos);
1340 __ mov(rcx, rax);
1341 __ pop_l(rax);
1342 // generate explicit div0 check
1343 __ testq(rcx, rcx);
1344 __ jump_cc(Assembler::zero,
1345 RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1346 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1347 // they are not equal, one could do a normal division (no correction
1348 // needed), which may speed up this implementation for the common case.
1349 // (see also JVM spec., p.243 & p.271)
1350 __ corrected_idivq(rcx); // kills rbx
1351 }
1352
1353 void TemplateTable::lrem() {
1354 transition(ltos, ltos);
1355 __ mov(rcx, rax);
1356 __ pop_l(rax);
1357 __ testq(rcx, rcx);
1358 __ jump_cc(Assembler::zero,
1359 RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1360 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1361 // they are not equal, one could do a normal division (no correction
1362 // needed), which may speed up this implementation for the common case.
1363 // (see also JVM spec., p.243 & p.271)
1364 __ corrected_idivq(rcx); // kills rbx
1365 __ mov(rax, rdx);
1366 }
1367
1368 void TemplateTable::lshl() {
1369 transition(itos, ltos);
1370 __ movl(rcx, rax); // get shift count
1371 __ pop_l(rax); // get shift value
1372 __ shlq(rax);
1373 }
1374
1375 void TemplateTable::lshr() {
1376 transition(itos, ltos);
1377 __ movl(rcx, rax); // get shift count
1378 __ pop_l(rax); // get shift value
1379 __ sarq(rax);
1380 }
1381
1382 void TemplateTable::lushr() {
1383 transition(itos, ltos);
1384 __ movl(rcx, rax); // get shift count
1385 __ pop_l(rax); // get shift value
1386 __ shrq(rax);
1387 }
1388
1389 void TemplateTable::fop2(Operation op) {
1390 transition(ftos, ftos);
1391
1392 switch (op) {
1393 case add:
1394 __ addss(xmm0, at_rsp());
1395 __ addptr(rsp, Interpreter::stackElementSize);
1396 break;
1397 case sub:
1398 __ movflt(xmm1, xmm0);
1399 __ pop_f(xmm0);
1400 __ subss(xmm0, xmm1);
1401 break;
1402 case mul:
1403 __ mulss(xmm0, at_rsp());
1404 __ addptr(rsp, Interpreter::stackElementSize);
1405 break;
1406 case div:
1407 __ movflt(xmm1, xmm0);
1408 __ pop_f(xmm0);
1409 __ divss(xmm0, xmm1);
1410 break;
1411 case rem:
1412 // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1413 // modulo operation. The frem method calls the function
1414 // double fmod(double x, double y) in math.h. The documentation of fmod states:
1415 // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1416 // (signalling or quiet) is returned.
1417 __ movflt(xmm1, xmm0);
1418 __ pop_f(xmm0);
1419 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1420 break;
1421 default:
1422 ShouldNotReachHere();
1423 break;
1424 }
1425 }
1426
1427 void TemplateTable::dop2(Operation op) {
1428 transition(dtos, dtos);
1429 switch (op) {
1430 case add:
1431 __ addsd(xmm0, at_rsp());
1432 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1433 break;
1434 case sub:
1435 __ movdbl(xmm1, xmm0);
1436 __ pop_d(xmm0);
1437 __ subsd(xmm0, xmm1);
1438 break;
1439 case mul:
1440 __ mulsd(xmm0, at_rsp());
1441 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1442 break;
1443 case div:
1444 __ movdbl(xmm1, xmm0);
1445 __ pop_d(xmm0);
1446 __ divsd(xmm0, xmm1);
1447 break;
1448 case rem:
1449 // Similar to fop2(), the modulo operation is performed using the
1450 // SharedRuntime::drem method on x86_64 platforms for the same reasons
1451 // as mentioned in fop2().
1452 __ movdbl(xmm1, xmm0);
1453 __ pop_d(xmm0);
1454 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1455 break;
1456 default:
1457 ShouldNotReachHere();
1458 break;
1459 }
1460 }
1461
1462 void TemplateTable::ineg() {
1463 transition(itos, itos);
1464 __ negl(rax);
1465 }
1466
1467 void TemplateTable::lneg() {
1468 transition(ltos, ltos);
1469 __ negq(rax);
1470 }
1471
1472 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1473 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1474 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1475 // of 128-bits operands for SSE instructions.
1476 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1477 // Store the value to a 128-bits operand.
1478 operand[0] = lo;
1479 operand[1] = hi;
1480 return operand;
1481 }
1482
1483 // Buffer for 128-bits masks used by SSE instructions.
1484 static jlong float_signflip_pool[2*2];
1485 static jlong double_signflip_pool[2*2];
1486
1487 void TemplateTable::fneg() {
1488 transition(ftos, ftos);
1489 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
1490 __ xorps(xmm0, ExternalAddress((address) float_signflip), rscratch1);
1491 }
1492
1493 void TemplateTable::dneg() {
1494 transition(dtos, dtos);
1495 static jlong *double_signflip =
1496 double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1497 __ xorpd(xmm0, ExternalAddress((address) double_signflip), rscratch1);
1498 }
1499
1500 void TemplateTable::iinc() {
1501 transition(vtos, vtos);
1502 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1503 locals_index(rbx);
1504 __ addl(iaddress(rbx), rdx);
1505 }
1506
1507 void TemplateTable::wide_iinc() {
1508 transition(vtos, vtos);
1509 __ movl(rdx, at_bcp(4)); // get constant
1510 locals_index_wide(rbx);
1511 __ bswapl(rdx); // swap bytes & sign-extend constant
1512 __ sarl(rdx, 16);
1513 __ addl(iaddress(rbx), rdx);
1514 // Note: should probably use only one movl to get both
1515 // the index and the constant -> fix this
1516 }
1517
1518 void TemplateTable::convert() {
1519 // Checking
1520 #ifdef ASSERT
1521 {
1522 TosState tos_in = ilgl;
1523 TosState tos_out = ilgl;
1524 switch (bytecode()) {
1525 case Bytecodes::_i2l: // fall through
1526 case Bytecodes::_i2f: // fall through
1527 case Bytecodes::_i2d: // fall through
1528 case Bytecodes::_i2b: // fall through
1529 case Bytecodes::_i2c: // fall through
1530 case Bytecodes::_i2s: tos_in = itos; break;
1531 case Bytecodes::_l2i: // fall through
1532 case Bytecodes::_l2f: // fall through
1533 case Bytecodes::_l2d: tos_in = ltos; break;
1534 case Bytecodes::_f2i: // fall through
1535 case Bytecodes::_f2l: // fall through
1536 case Bytecodes::_f2d: tos_in = ftos; break;
1537 case Bytecodes::_d2i: // fall through
1538 case Bytecodes::_d2l: // fall through
1539 case Bytecodes::_d2f: tos_in = dtos; break;
1540 default : ShouldNotReachHere();
1541 }
1542 switch (bytecode()) {
1543 case Bytecodes::_l2i: // fall through
1544 case Bytecodes::_f2i: // fall through
1545 case Bytecodes::_d2i: // fall through
1546 case Bytecodes::_i2b: // fall through
1547 case Bytecodes::_i2c: // fall through
1548 case Bytecodes::_i2s: tos_out = itos; break;
1549 case Bytecodes::_i2l: // fall through
1550 case Bytecodes::_f2l: // fall through
1551 case Bytecodes::_d2l: tos_out = ltos; break;
1552 case Bytecodes::_i2f: // fall through
1553 case Bytecodes::_l2f: // fall through
1554 case Bytecodes::_d2f: tos_out = ftos; break;
1555 case Bytecodes::_i2d: // fall through
1556 case Bytecodes::_l2d: // fall through
1557 case Bytecodes::_f2d: tos_out = dtos; break;
1558 default : ShouldNotReachHere();
1559 }
1560 transition(tos_in, tos_out);
1561 }
1562 #endif // ASSERT
1563
1564 static const int64_t is_nan = 0x8000000000000000L;
1565
1566 // Conversion
1567 switch (bytecode()) {
1568 case Bytecodes::_i2l:
1569 __ movslq(rax, rax);
1570 break;
1571 case Bytecodes::_i2f:
1572 __ cvtsi2ssl(xmm0, rax);
1573 break;
1574 case Bytecodes::_i2d:
1575 __ cvtsi2sdl(xmm0, rax);
1576 break;
1577 case Bytecodes::_i2b:
1578 __ movsbl(rax, rax);
1579 break;
1580 case Bytecodes::_i2c:
1581 __ movzwl(rax, rax);
1582 break;
1583 case Bytecodes::_i2s:
1584 __ movswl(rax, rax);
1585 break;
1586 case Bytecodes::_l2i:
1587 __ movl(rax, rax);
1588 break;
1589 case Bytecodes::_l2f:
1590 __ cvtsi2ssq(xmm0, rax);
1591 break;
1592 case Bytecodes::_l2d:
1593 __ cvtsi2sdq(xmm0, rax);
1594 break;
1595 case Bytecodes::_f2i:
1596 {
1597 Label L;
1598 __ cvttss2sil(rax, xmm0);
1599 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1600 __ jcc(Assembler::notEqual, L);
1601 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1602 __ bind(L);
1603 }
1604 break;
1605 case Bytecodes::_f2l:
1606 {
1607 Label L;
1608 __ cvttss2siq(rax, xmm0);
1609 // NaN or overflow/underflow?
1610 __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1);
1611 __ jcc(Assembler::notEqual, L);
1612 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1613 __ bind(L);
1614 }
1615 break;
1616 case Bytecodes::_f2d:
1617 __ cvtss2sd(xmm0, xmm0);
1618 break;
1619 case Bytecodes::_d2i:
1620 {
1621 Label L;
1622 __ cvttsd2sil(rax, xmm0);
1623 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1624 __ jcc(Assembler::notEqual, L);
1625 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1626 __ bind(L);
1627 }
1628 break;
1629 case Bytecodes::_d2l:
1630 {
1631 Label L;
1632 __ cvttsd2siq(rax, xmm0);
1633 // NaN or overflow/underflow?
1634 __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1);
1635 __ jcc(Assembler::notEqual, L);
1636 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1637 __ bind(L);
1638 }
1639 break;
1640 case Bytecodes::_d2f:
1641 __ cvtsd2ss(xmm0, xmm0);
1642 break;
1643 default:
1644 ShouldNotReachHere();
1645 }
1646 }
1647
1648 void TemplateTable::lcmp() {
1649 transition(ltos, itos);
1650 Label done;
1651 __ pop_l(rdx);
1652 __ cmpq(rdx, rax);
1653 __ movl(rax, -1);
1654 __ jccb(Assembler::less, done);
1655 __ setb(Assembler::notEqual, rax);
1656 __ movzbl(rax, rax);
1657 __ bind(done);
1658 }
1659
1660 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1661 Label done;
1662 if (is_float) {
1663 // XXX get rid of pop here, use ... reg, mem32
1664 __ pop_f(xmm1);
1665 __ ucomiss(xmm1, xmm0);
1666 } else {
1667 // XXX get rid of pop here, use ... reg, mem64
1668 __ pop_d(xmm1);
1669 __ ucomisd(xmm1, xmm0);
1670 }
1671 if (unordered_result < 0) {
1672 __ movl(rax, -1);
1673 __ jccb(Assembler::parity, done);
1674 __ jccb(Assembler::below, done);
1675 __ setb(Assembler::notEqual, rdx);
1676 __ movzbl(rax, rdx);
1677 } else {
1678 __ movl(rax, 1);
1679 __ jccb(Assembler::parity, done);
1680 __ jccb(Assembler::above, done);
1681 __ movl(rax, 0);
1682 __ jccb(Assembler::equal, done);
1683 __ decrementl(rax);
1684 }
1685 __ bind(done);
1686 }
1687
1688 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1689 __ get_method(rcx); // rcx holds method
1690 __ profile_taken_branch(rax); // rax holds updated MDP
1691
1692 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1693 InvocationCounter::counter_offset();
1694 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1695 InvocationCounter::counter_offset();
1696
1697 // Load up edx with the branch displacement
1698 if (is_wide) {
1699 __ movl(rdx, at_bcp(1));
1700 } else {
1701 __ load_signed_short(rdx, at_bcp(1));
1702 }
1703 __ bswapl(rdx);
1704
1705 if (!is_wide) {
1706 __ sarl(rdx, 16);
1707 }
1708 __ movl2ptr(rdx, rdx);
1709
1710 // Handle all the JSR stuff here, then exit.
1711 // It's much shorter and cleaner than intermingling with the non-JSR
1712 // normal-branch stuff occurring below.
1713 if (is_jsr) {
1714 // Pre-load the next target bytecode into rbx
1715 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
1716
1717 // compute return address as bci in rax
1718 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1719 in_bytes(ConstMethod::codes_offset())));
1720 __ subptr(rax, Address(rcx, Method::const_offset()));
1721 // Adjust the bcp in r13 by the displacement in rdx
1722 __ addptr(rbcp, rdx);
1723 // jsr returns atos that is not an oop
1724 __ push_i(rax);
1725 __ dispatch_only(vtos, true);
1726 return;
1727 }
1728
1729 // Normal (non-jsr) branch handling
1730
1731 // Adjust the bcp in r13 by the displacement in rdx
1732 __ addptr(rbcp, rdx);
1733
1734 assert(UseLoopCounter || !UseOnStackReplacement,
1735 "on-stack-replacement requires loop counters");
1736 Label backedge_counter_overflow;
1737 Label dispatch;
1738 if (UseLoopCounter) {
1739 // increment backedge counter for backward branches
1740 // rax: MDO
1741 // rcx: method
1742 // rdx: target offset
1743 // r13: target bcp
1744 // r14: locals pointer
1745 __ testl(rdx, rdx); // check if forward or backward branch
1746 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1747
1748 // check if MethodCounters exists
1749 Label has_counters;
1750 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1751 __ testptr(rax, rax);
1752 __ jcc(Assembler::notZero, has_counters);
1753 __ push(rdx);
1754 __ push(rcx);
1755 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
1756 rcx);
1757 __ pop(rcx);
1758 __ pop(rdx);
1759 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1760 __ testptr(rax, rax);
1761 __ jcc(Assembler::zero, dispatch);
1762 __ bind(has_counters);
1763
1764 Label no_mdo;
1765 if (ProfileInterpreter) {
1766 // Are we profiling?
1767 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1768 __ testptr(rbx, rbx);
1769 __ jccb(Assembler::zero, no_mdo);
1770 // Increment the MDO backedge counter
1771 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1772 in_bytes(InvocationCounter::counter_offset()));
1773 const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
1774 __ increment_mask_and_jump(mdo_backedge_counter, mask, rax,
1775 UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
1776 __ jmp(dispatch);
1777 }
1778 __ bind(no_mdo);
1779 // Increment backedge counter in MethodCounters*
1780 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1781 const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
1782 __ increment_mask_and_jump(Address(rcx, be_offset), mask, rax,
1783 UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
1784 __ bind(dispatch);
1785 }
1786
1787 // Pre-load the next target bytecode into rbx
1788 __ load_unsigned_byte(rbx, Address(rbcp, 0));
1789
1790 // continue with the bytecode @ target
1791 // rax: return bci for jsr's, unused otherwise
1792 // rbx: target bytecode
1793 // r13: target bcp
1794 __ dispatch_only(vtos, true);
1795
1796 if (UseLoopCounter) {
1797 if (UseOnStackReplacement) {
1798 Label set_mdp;
1799 // invocation counter overflow
1800 __ bind(backedge_counter_overflow);
1801 __ negptr(rdx);
1802 __ addptr(rdx, rbcp); // branch bcp
1803 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1804 __ call_VM(noreg,
1805 CAST_FROM_FN_PTR(address,
1806 InterpreterRuntime::frequency_counter_overflow),
1807 rdx);
1808
1809 // rax: osr nmethod (osr ok) or null (osr not possible)
1810 // rdx: scratch
1811 // r14: locals pointer
1812 // r13: bcp
1813 __ testptr(rax, rax); // test result
1814 __ jcc(Assembler::zero, dispatch); // no osr if null
1815 // nmethod may have been invalidated (VM may block upon call_VM return)
1816 __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
1817 __ jcc(Assembler::notEqual, dispatch);
1818
1819 // We have the address of an on stack replacement routine in rax.
1820 // In preparation of invoking it, first we must migrate the locals
1821 // and monitors from off the interpreter frame on the stack.
1822 // Ensure to save the osr nmethod over the migration call,
1823 // it will be preserved in rbx.
1824 __ mov(rbx, rax);
1825
1826 JFR_ONLY(__ enter_jfr_critical_section();)
1827
1828 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1829
1830 // rax is OSR buffer, move it to expected parameter location
1831 __ mov(j_rarg0, rax);
1832 // We use j_rarg definitions here so that registers don't conflict as parameter
1833 // registers change across platforms as we are in the midst of a calling
1834 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1835
1836 const Register retaddr = j_rarg2;
1837 const Register sender_sp = j_rarg1;
1838
1839 // pop the interpreter frame
1840 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1841 __ leave(); // remove frame anchor
1842 JFR_ONLY(__ leave_jfr_critical_section();)
1843 __ pop(retaddr); // get return address
1844 __ mov(rsp, sender_sp); // set sp to sender sp
1845 // Ensure compiled code always sees stack at proper alignment
1846 __ andptr(rsp, -(StackAlignmentInBytes));
1847
1848 // push the return address
1849 __ push(retaddr);
1850
1851 // and begin the OSR nmethod
1852 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1853 }
1854 }
1855 }
1856
1857 void TemplateTable::if_0cmp(Condition cc) {
1858 transition(itos, vtos);
1859 // assume branch is more often taken than not (loops use backward branches)
1860 Label not_taken;
1861 __ testl(rax, rax);
1862 __ jcc(j_not(cc), not_taken);
1863 branch(false, false);
1864 __ bind(not_taken);
1865 __ profile_not_taken_branch(rax);
1866 }
1867
1868 void TemplateTable::if_icmp(Condition cc) {
1869 transition(itos, vtos);
1870 // assume branch is more often taken than not (loops use backward branches)
1871 Label not_taken;
1872 __ pop_i(rdx);
1873 __ cmpl(rdx, rax);
1874 __ jcc(j_not(cc), not_taken);
1875 branch(false, false);
1876 __ bind(not_taken);
1877 __ profile_not_taken_branch(rax);
1878 }
1879
1880 void TemplateTable::if_nullcmp(Condition cc) {
1881 transition(atos, vtos);
1882 // assume branch is more often taken than not (loops use backward branches)
1883 Label not_taken;
1884 __ testptr(rax, rax);
1885 __ jcc(j_not(cc), not_taken);
1886 branch(false, false);
1887 __ bind(not_taken);
1888 __ profile_not_taken_branch(rax);
1889 }
1890
1891 void TemplateTable::if_acmp(Condition cc) {
1892 transition(atos, vtos);
1893 // assume branch is more often taken than not (loops use backward branches)
1894 Label not_taken;
1895 __ pop_ptr(rdx);
1896 __ cmpoop(rdx, rax);
1897 __ jcc(j_not(cc), not_taken);
1898 branch(false, false);
1899 __ bind(not_taken);
1900 __ profile_not_taken_branch(rax);
1901 }
1902
1903 void TemplateTable::ret() {
1904 transition(vtos, vtos);
1905 locals_index(rbx);
1906 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1907 __ profile_ret(rbx, rcx);
1908 __ get_method(rax);
1909 __ movptr(rbcp, Address(rax, Method::const_offset()));
1910 __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
1911 ConstMethod::codes_offset()));
1912 __ dispatch_next(vtos, 0, true);
1913 }
1914
1915 void TemplateTable::wide_ret() {
1916 transition(vtos, vtos);
1917 locals_index_wide(rbx);
1918 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1919 __ profile_ret(rbx, rcx);
1920 __ get_method(rax);
1921 __ movptr(rbcp, Address(rax, Method::const_offset()));
1922 __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
1923 __ dispatch_next(vtos, 0, true);
1924 }
1925
1926 void TemplateTable::tableswitch() {
1927 Label default_case, continue_execution;
1928 transition(itos, vtos);
1929
1930 // align r13/rsi
1931 __ lea(rbx, at_bcp(BytesPerInt));
1932 __ andptr(rbx, -BytesPerInt);
1933 // load lo & hi
1934 __ movl(rcx, Address(rbx, BytesPerInt));
1935 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1936 __ bswapl(rcx);
1937 __ bswapl(rdx);
1938 // check against lo & hi
1939 __ cmpl(rax, rcx);
1940 __ jcc(Assembler::less, default_case);
1941 __ cmpl(rax, rdx);
1942 __ jcc(Assembler::greater, default_case);
1943 // lookup dispatch offset
1944 __ subl(rax, rcx);
1945 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1946 __ profile_switch_case(rax, rbx, rcx);
1947 // continue execution
1948 __ bind(continue_execution);
1949 __ bswapl(rdx);
1950 __ movl2ptr(rdx, rdx);
1951 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
1952 __ addptr(rbcp, rdx);
1953 __ dispatch_only(vtos, true);
1954 // handle default
1955 __ bind(default_case);
1956 __ profile_switch_default(rax);
1957 __ movl(rdx, Address(rbx, 0));
1958 __ jmp(continue_execution);
1959 }
1960
1961 void TemplateTable::lookupswitch() {
1962 transition(itos, itos);
1963 __ stop("lookupswitch bytecode should have been rewritten");
1964 }
1965
1966 void TemplateTable::fast_linearswitch() {
1967 transition(itos, vtos);
1968 Label loop_entry, loop, found, continue_execution;
1969 // bswap rax so we can avoid bswapping the table entries
1970 __ bswapl(rax);
1971 // align r13
1972 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1973 // this instruction (change offsets
1974 // below)
1975 __ andptr(rbx, -BytesPerInt);
1976 // set counter
1977 __ movl(rcx, Address(rbx, BytesPerInt));
1978 __ bswapl(rcx);
1979 __ jmpb(loop_entry);
1980 // table search
1981 __ bind(loop);
1982 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1983 __ jcc(Assembler::equal, found);
1984 __ bind(loop_entry);
1985 __ decrementl(rcx);
1986 __ jcc(Assembler::greaterEqual, loop);
1987 // default case
1988 __ profile_switch_default(rax);
1989 __ movl(rdx, Address(rbx, 0));
1990 __ jmp(continue_execution);
1991 // entry found -> get offset
1992 __ bind(found);
1993 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1994 __ profile_switch_case(rcx, rax, rbx);
1995 // continue execution
1996 __ bind(continue_execution);
1997 __ bswapl(rdx);
1998 __ movl2ptr(rdx, rdx);
1999 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2000 __ addptr(rbcp, rdx);
2001 __ dispatch_only(vtos, true);
2002 }
2003
2004 void TemplateTable::fast_binaryswitch() {
2005 transition(itos, vtos);
2006 // Implementation using the following core algorithm:
2007 //
2008 // int binary_search(int key, LookupswitchPair* array, int n) {
2009 // // Binary search according to "Methodik des Programmierens" by
2010 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2011 // int i = 0;
2012 // int j = n;
2013 // while (i+1 < j) {
2014 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2015 // // with Q: for all i: 0 <= i < n: key < a[i]
2016 // // where a stands for the array and assuming that the (inexisting)
2017 // // element a[n] is infinitely big.
2018 // int h = (i + j) >> 1;
2019 // // i < h < j
2020 // if (key < array[h].fast_match()) {
2021 // j = h;
2022 // } else {
2023 // i = h;
2024 // }
2025 // }
2026 // // R: a[i] <= key < a[i+1] or Q
2027 // // (i.e., if key is within array, i is the correct index)
2028 // return i;
2029 // }
2030
2031 // Register allocation
2032 const Register key = rax; // already set (tosca)
2033 const Register array = rbx;
2034 const Register i = rcx;
2035 const Register j = rdx;
2036 const Register h = rdi;
2037 const Register temp = rsi;
2038
2039 // Find array start
2040 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2041 // get rid of this
2042 // instruction (change
2043 // offsets below)
2044 __ andptr(array, -BytesPerInt);
2045
2046 // Initialize i & j
2047 __ xorl(i, i); // i = 0;
2048 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2049
2050 // Convert j into native byteordering
2051 __ bswapl(j);
2052
2053 // And start
2054 Label entry;
2055 __ jmp(entry);
2056
2057 // binary search loop
2058 {
2059 Label loop;
2060 __ bind(loop);
2061 // int h = (i + j) >> 1;
2062 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2063 __ sarl(h, 1); // h = (i + j) >> 1;
2064 // if (key < array[h].fast_match()) {
2065 // j = h;
2066 // } else {
2067 // i = h;
2068 // }
2069 // Convert array[h].match to native byte-ordering before compare
2070 __ movl(temp, Address(array, h, Address::times_8));
2071 __ bswapl(temp);
2072 __ cmpl(key, temp);
2073 // j = h if (key < array[h].fast_match())
2074 __ cmov32(Assembler::less, j, h);
2075 // i = h if (key >= array[h].fast_match())
2076 __ cmov32(Assembler::greaterEqual, i, h);
2077 // while (i+1 < j)
2078 __ bind(entry);
2079 __ leal(h, Address(i, 1)); // i+1
2080 __ cmpl(h, j); // i+1 < j
2081 __ jcc(Assembler::less, loop);
2082 }
2083
2084 // end of binary search, result index is i (must check again!)
2085 Label default_case;
2086 // Convert array[i].match to native byte-ordering before compare
2087 __ movl(temp, Address(array, i, Address::times_8));
2088 __ bswapl(temp);
2089 __ cmpl(key, temp);
2090 __ jcc(Assembler::notEqual, default_case);
2091
2092 // entry found -> j = offset
2093 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2094 __ profile_switch_case(i, key, array);
2095 __ bswapl(j);
2096 __ movslq(j, j);
2097
2098 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2099 __ addptr(rbcp, j);
2100 __ dispatch_only(vtos, true);
2101
2102 // default case -> j = default offset
2103 __ bind(default_case);
2104 __ profile_switch_default(i);
2105 __ movl(j, Address(array, -2 * BytesPerInt));
2106 __ bswapl(j);
2107 __ movslq(j, j);
2108
2109 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2110 __ addptr(rbcp, j);
2111 __ dispatch_only(vtos, true);
2112 }
2113
2114 void TemplateTable::_return(TosState state) {
2115 transition(state, state);
2116
2117 assert(_desc->calls_vm(),
2118 "inconsistent calls_vm information"); // call in remove_activation
2119
2120 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2121 assert(state == vtos, "only valid state");
2122 Register robj = c_rarg1;
2123 __ movptr(robj, aaddress(0));
2124 __ load_klass(rdi, robj, rscratch1);
2125 __ testb(Address(rdi, Klass::misc_flags_offset()), KlassFlags::_misc_has_finalizer);
2126 Label skip_register_finalizer;
2127 __ jcc(Assembler::zero, skip_register_finalizer);
2128
2129 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2130
2131 __ bind(skip_register_finalizer);
2132 }
2133
2134 if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2135 Label no_safepoint;
2136 NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2137 __ testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2138 __ jcc(Assembler::zero, no_safepoint);
2139 __ push(state);
2140 __ push_cont_fastpath();
2141 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2142 InterpreterRuntime::at_safepoint));
2143 __ pop_cont_fastpath();
2144 __ pop(state);
2145 __ bind(no_safepoint);
2146 }
2147
2148 // Narrow result if state is itos but result type is smaller.
2149 // Need to narrow in the return bytecode rather than in generate_return_entry
2150 // since compiled code callers expect the result to already be narrowed.
2151 if (state == itos) {
2152 __ narrow(rax);
2153 }
2154 __ remove_activation(state, rbcp);
2155
2156 __ jmp(rbcp);
2157 }
2158
2159 // ----------------------------------------------------------------------------
2160 // Volatile variables demand their effects be made known to all CPU's
2161 // in order. Store buffers on most chips allow reads & writes to
2162 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2163 // without some kind of memory barrier (i.e., it's not sufficient that
2164 // the interpreter does not reorder volatile references, the hardware
2165 // also must not reorder them).
2166 //
2167 // According to the new Java Memory Model (JMM):
2168 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2169 // writes act as acquire & release, so:
2170 // (2) A read cannot let unrelated NON-volatile memory refs that
2171 // happen after the read float up to before the read. It's OK for
2172 // non-volatile memory refs that happen before the volatile read to
2173 // float down below it.
2174 // (3) Similar a volatile write cannot let unrelated NON-volatile
2175 // memory refs that happen BEFORE the write float down to after the
2176 // write. It's OK for non-volatile memory refs that happen after the
2177 // volatile write to float up before it.
2178 //
2179 // We only put in barriers around volatile refs (they are expensive),
2180 // not _between_ memory refs (that would require us to track the
2181 // flavor of the previous memory refs). Requirements (2) and (3)
2182 // require some barriers before volatile stores and after volatile
2183 // loads. These nearly cover requirement (1) but miss the
2184 // volatile-store-volatile-load case. This final case is placed after
2185 // volatile-stores although it could just as well go before
2186 // volatile-loads.
2187
2188 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2189 // Helper function to insert a is-volatile test and memory barrier
2190 __ membar(order_constraint);
2191 }
2192
2193 void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
2194 Register cache,
2195 Register index) {
2196 const Register temp = rbx;
2197 assert_different_registers(cache, index, temp);
2198
2199 Label L_clinit_barrier_slow, L_done;
2200
2201 Bytecodes::Code code = bytecode();
2202
2203 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2204
2205 __ load_method_entry(cache, index);
2206 switch(byte_no) {
2207 case f1_byte:
2208 __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedMethodEntry::bytecode1_offset())));
2209 break;
2210 case f2_byte:
2211 __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedMethodEntry::bytecode2_offset())));
2212 break;
2213 default:
2214 ShouldNotReachHere();
2215 }
2216 __ cmpl(temp, code); // have we resolved this bytecode?
2217
2218 // Class initialization barrier for static methods
2219 if (bytecode() == Bytecodes::_invokestatic) {
2220 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
2221 const Register method = temp;
2222 const Register klass = temp;
2223
2224 __ jcc(Assembler::notEqual, L_clinit_barrier_slow);
2225 __ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2226 __ load_method_holder(klass, method);
2227 __ clinit_barrier(klass, &L_done, /*L_slow_path*/ nullptr);
2228 __ bind(L_clinit_barrier_slow);
2229 } else {
2230 __ jcc(Assembler::equal, L_done);
2231 }
2232
2233 // resolve first time through
2234 // Class initialization barrier slow path lands here as well.
2235 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2236 __ movl(temp, code);
2237 __ call_VM_preemptable(noreg, entry, temp);
2238 // Update registers with resolved info
2239 __ load_method_entry(cache, index);
2240 __ bind(L_done);
2241 }
2242
2243 void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
2244 Register cache,
2245 Register index) {
2246 const Register temp = rbx;
2247 assert_different_registers(cache, index, temp);
2248
2249 Label L_clinit_barrier_slow, L_done;
2250
2251 Bytecodes::Code code = bytecode();
2252 switch (code) {
2253 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2254 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2255 default: break;
2256 }
2257
2258 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2259 __ load_field_entry(cache, index);
2260 if (byte_no == f1_byte) {
2261 __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedFieldEntry::get_code_offset())));
2262 } else {
2263 __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedFieldEntry::put_code_offset())));
2264 }
2265 __ cmpl(temp, code); // have we resolved this bytecode?
2266
2267 // Class initialization barrier for static fields
2268 if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
2269 assert(VM_Version::supports_fast_class_init_checks(), "sanity");
2270 const Register field_holder = temp;
2271
2272 __ jcc(Assembler::notEqual, L_clinit_barrier_slow);
2273 __ movptr(field_holder, Address(cache, in_bytes(ResolvedFieldEntry::field_holder_offset())));
2274 __ clinit_barrier(field_holder, &L_done, /*L_slow_path*/ nullptr);
2275 __ bind(L_clinit_barrier_slow);
2276 } else {
2277 __ jcc(Assembler::equal, L_done);
2278 }
2279
2280 // resolve first time through
2281 // Class initialization barrier slow path lands here as well.
2282 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2283 __ movl(temp, code);
2284 __ call_VM_preemptable(noreg, entry, temp);
2285 // Update registers with resolved info
2286 __ load_field_entry(cache, index);
2287 __ bind(L_done);
2288 }
2289
2290 void TemplateTable::load_resolved_field_entry(Register obj,
2291 Register cache,
2292 Register tos_state,
2293 Register offset,
2294 Register flags,
2295 bool is_static = false) {
2296 assert_different_registers(cache, tos_state, flags, offset);
2297
2298 // Field offset
2299 __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
2300
2301 // Flags
2302 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset())));
2303
2304 // TOS state
2305 __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset())));
2306
2307 // Klass overwrite register
2308 if (is_static) {
2309 __ movptr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset()));
2310 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2311 __ movptr(obj, Address(obj, mirror_offset));
2312 __ resolve_oop_handle(obj, rscratch2);
2313 }
2314
2315 }
2316
2317 void TemplateTable::load_invokedynamic_entry(Register method) {
2318 // setup registers
2319 const Register appendix = rax;
2320 const Register cache = rcx;
2321 const Register index = rdx;
2322 assert_different_registers(method, appendix, cache, index);
2323
2324 __ save_bcp();
2325
2326 Label resolved;
2327
2328 __ load_resolved_indy_entry(cache, index);
2329 __ movptr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2330
2331 // Compare the method to zero
2332 __ testptr(method, method);
2333 __ jcc(Assembler::notZero, resolved);
2334
2335 Bytecodes::Code code = bytecode();
2336
2337 // Call to the interpreter runtime to resolve invokedynamic
2338 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2339 __ movl(method, code); // this is essentially Bytecodes::_invokedynamic
2340 __ call_VM(noreg, entry, method);
2341 // Update registers with resolved info
2342 __ load_resolved_indy_entry(cache, index);
2343 __ movptr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2344
2345 #ifdef ASSERT
2346 __ testptr(method, method);
2347 __ jcc(Assembler::notZero, resolved);
2348 __ stop("Should be resolved by now");
2349 #endif // ASSERT
2350 __ bind(resolved);
2351
2352 Label L_no_push;
2353 // Check if there is an appendix
2354 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())));
2355 __ testl(index, (1 << ResolvedIndyEntry::has_appendix_shift));
2356 __ jcc(Assembler::zero, L_no_push);
2357
2358 // Get appendix
2359 __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())));
2360 // Push the appendix as a trailing parameter
2361 // since the parameter_size includes it.
2362 __ load_resolved_reference_at_index(appendix, index);
2363 __ verify_oop(appendix);
2364 __ push(appendix); // push appendix (MethodType, CallSite, etc.)
2365 __ bind(L_no_push);
2366
2367 // compute return type
2368 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())));
2369 // load return address
2370 {
2371 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2372 ExternalAddress table(table_addr);
2373 __ lea(rscratch1, table);
2374 __ movptr(index, Address(rscratch1, index, Address::times_ptr));
2375 }
2376
2377 // push return address
2378 __ push(index);
2379 }
2380
2381 void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
2382 Register method,
2383 Register flags) {
2384 // setup registers
2385 const Register index = rdx;
2386 assert_different_registers(cache, index);
2387 assert_different_registers(method, cache, flags);
2388
2389 // determine constant pool cache field offsets
2390 resolve_cache_and_index_for_method(f1_byte, cache, index);
2391 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2392 __ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2393 }
2394
2395 void TemplateTable::load_resolved_method_entry_handle(Register cache,
2396 Register method,
2397 Register ref_index,
2398 Register flags) {
2399 // setup registers
2400 const Register index = rdx;
2401 assert_different_registers(cache, index);
2402 assert_different_registers(cache, method, ref_index, flags);
2403
2404 // determine constant pool cache field offsets
2405 resolve_cache_and_index_for_method(f1_byte, cache, index);
2406 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2407
2408 // Maybe push appendix
2409 Label L_no_push;
2410 __ testl(flags, (1 << ResolvedMethodEntry::has_appendix_shift));
2411 __ jcc(Assembler::zero, L_no_push);
2412 // invokehandle uses an index into the resolved references array
2413 __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
2414 // Push the appendix as a trailing parameter.
2415 // This must be done before we get the receiver,
2416 // since the parameter_size includes it.
2417 Register appendix = method;
2418 __ load_resolved_reference_at_index(appendix, ref_index);
2419 __ push(appendix); // push appendix (MethodType, CallSite, etc.)
2420 __ bind(L_no_push);
2421
2422 __ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2423 }
2424
2425 void TemplateTable::load_resolved_method_entry_interface(Register cache,
2426 Register klass,
2427 Register method_or_table_index,
2428 Register flags) {
2429 // setup registers
2430 const Register index = rdx;
2431 assert_different_registers(cache, klass, method_or_table_index, flags);
2432
2433 // determine constant pool cache field offsets
2434 resolve_cache_and_index_for_method(f1_byte, cache, index);
2435 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2436
2437 // Invokeinterface can behave in different ways:
2438 // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
2439 // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
2440 // vtable index is placed in the register.
2441 // Otherwise, the registers will be populated with the klass and method.
2442
2443 Label NotVirtual; Label NotVFinal; Label Done;
2444 __ testl(flags, 1 << ResolvedMethodEntry::is_forced_virtual_shift);
2445 __ jcc(Assembler::zero, NotVirtual);
2446 __ testl(flags, (1 << ResolvedMethodEntry::is_vfinal_shift));
2447 __ jcc(Assembler::zero, NotVFinal);
2448 __ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2449 __ jmp(Done);
2450
2451 __ bind(NotVFinal);
2452 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2453 __ jmp(Done);
2454
2455 __ bind(NotVirtual);
2456 __ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2457 __ movptr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
2458 __ bind(Done);
2459 }
2460
2461 void TemplateTable::load_resolved_method_entry_virtual(Register cache,
2462 Register method_or_table_index,
2463 Register flags) {
2464 // setup registers
2465 const Register index = rdx;
2466 assert_different_registers(index, cache);
2467 assert_different_registers(method_or_table_index, cache, flags);
2468
2469 // determine constant pool cache field offsets
2470 resolve_cache_and_index_for_method(f2_byte, cache, index);
2471 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2472
2473 // method_or_table_index can either be an itable index or a method depending on the virtual final flag
2474 Label isVFinal; Label Done;
2475 __ testl(flags, (1 << ResolvedMethodEntry::is_vfinal_shift));
2476 __ jcc(Assembler::notZero, isVFinal);
2477 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2478 __ jmp(Done);
2479 __ bind(isVFinal);
2480 __ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2481 __ bind(Done);
2482 }
2483
2484 // The registers cache and index expected to be set before call.
2485 // Correct values of the cache and index registers are preserved.
2486 void TemplateTable::jvmti_post_field_access(Register cache,
2487 Register index,
2488 bool is_static,
2489 bool has_tos) {
2490 if (JvmtiExport::can_post_field_access()) {
2491 // Check to see if a field access watch has been set before we take
2492 // the time to call into the VM.
2493 Label L1;
2494 assert_different_registers(cache, index, rax);
2495 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2496 __ testl(rax,rax);
2497 __ jcc(Assembler::zero, L1);
2498
2499 // cache entry pointer
2500 __ load_field_entry(cache, index);
2501 if (is_static) {
2502 __ xorptr(rax, rax); // null object reference
2503 } else {
2504 __ pop(atos); // Get the object
2505 __ verify_oop(rax);
2506 __ push(atos); // Restore stack state
2507 }
2508 // rax,: object pointer or null
2509 // cache: cache entry pointer
2510 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2511 rax, cache);
2512
2513 __ load_field_entry(cache, index);
2514 __ bind(L1);
2515 }
2516 }
2517
2518 void TemplateTable::pop_and_check_object(Register r) {
2519 __ pop_ptr(r);
2520 __ null_check(r); // for field access must check obj.
2521 __ verify_oop(r);
2522 }
2523
2524 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2525 transition(vtos, vtos);
2526
2527 const Register obj = c_rarg3;
2528 const Register cache = rcx;
2529 const Register index = rdx;
2530 const Register off = rbx;
2531 const Register tos_state = rax;
2532 const Register flags = rdx;
2533 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2534
2535 resolve_cache_and_index_for_field(byte_no, cache, index);
2536 jvmti_post_field_access(cache, index, is_static, false);
2537 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2538
2539 if (!is_static) pop_and_check_object(obj);
2540
2541 const Address field(obj, off, Address::times_1, 0*wordSize);
2542
2543 Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj;
2544
2545 // Make sure we don't need to mask edx after the above shift
2546 assert(btos == 0, "change code, btos != 0");
2547 __ testl(tos_state, tos_state);
2548 __ jcc(Assembler::notZero, notByte);
2549
2550 // btos
2551 __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg);
2552 __ push(btos);
2553 // Rewrite bytecode to be faster
2554 if (!is_static && rc == may_rewrite) {
2555 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2556 }
2557 __ jmp(Done);
2558
2559 __ bind(notByte);
2560 __ cmpl(tos_state, ztos);
2561 __ jcc(Assembler::notEqual, notBool);
2562
2563 // ztos (same code as btos)
2564 __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg);
2565 __ push(ztos);
2566 // Rewrite bytecode to be faster
2567 if (!is_static && rc == may_rewrite) {
2568 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2569 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2570 }
2571 __ jmp(Done);
2572
2573 __ bind(notBool);
2574 __ cmpl(tos_state, atos);
2575 __ jcc(Assembler::notEqual, notObj);
2576 // atos
2577 do_oop_load(_masm, field, rax);
2578 __ push(atos);
2579 if (!is_static && rc == may_rewrite) {
2580 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2581 }
2582 __ jmp(Done);
2583
2584 __ bind(notObj);
2585 __ cmpl(tos_state, itos);
2586 __ jcc(Assembler::notEqual, notInt);
2587 // itos
2588 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg);
2589 __ push(itos);
2590 // Rewrite bytecode to be faster
2591 if (!is_static && rc == may_rewrite) {
2592 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2593 }
2594 __ jmp(Done);
2595
2596 __ bind(notInt);
2597 __ cmpl(tos_state, ctos);
2598 __ jcc(Assembler::notEqual, notChar);
2599 // ctos
2600 __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg);
2601 __ push(ctos);
2602 // Rewrite bytecode to be faster
2603 if (!is_static && rc == may_rewrite) {
2604 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2605 }
2606 __ jmp(Done);
2607
2608 __ bind(notChar);
2609 __ cmpl(tos_state, stos);
2610 __ jcc(Assembler::notEqual, notShort);
2611 // stos
2612 __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg);
2613 __ push(stos);
2614 // Rewrite bytecode to be faster
2615 if (!is_static && rc == may_rewrite) {
2616 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2617 }
2618 __ jmp(Done);
2619
2620 __ bind(notShort);
2621 __ cmpl(tos_state, ltos);
2622 __ jcc(Assembler::notEqual, notLong);
2623 // ltos
2624 // Generate code as if volatile (x86_32). There just aren't enough registers to
2625 // save that information and this code is faster than the test.
2626 __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg);
2627 __ push(ltos);
2628 // Rewrite bytecode to be faster
2629 if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2630 __ jmp(Done);
2631
2632 __ bind(notLong);
2633 __ cmpl(tos_state, ftos);
2634 __ jcc(Assembler::notEqual, notFloat);
2635 // ftos
2636
2637 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg);
2638 __ push(ftos);
2639 // Rewrite bytecode to be faster
2640 if (!is_static && rc == may_rewrite) {
2641 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2642 }
2643 __ jmp(Done);
2644
2645 __ bind(notFloat);
2646 #ifdef ASSERT
2647 Label notDouble;
2648 __ cmpl(tos_state, dtos);
2649 __ jcc(Assembler::notEqual, notDouble);
2650 #endif
2651 // dtos
2652 // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
2653 __ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg);
2654 __ push(dtos);
2655 // Rewrite bytecode to be faster
2656 if (!is_static && rc == may_rewrite) {
2657 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2658 }
2659 #ifdef ASSERT
2660 __ jmp(Done);
2661
2662 __ bind(notDouble);
2663 __ stop("Bad state");
2664 #endif
2665
2666 __ bind(Done);
2667 // [jk] not needed currently
2668 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2669 // Assembler::LoadStore));
2670 }
2671
2672 void TemplateTable::getfield(int byte_no) {
2673 getfield_or_static(byte_no, false);
2674 }
2675
2676 void TemplateTable::nofast_getfield(int byte_no) {
2677 getfield_or_static(byte_no, false, may_not_rewrite);
2678 }
2679
2680 void TemplateTable::getstatic(int byte_no) {
2681 getfield_or_static(byte_no, true);
2682 }
2683
2684
2685 // The registers cache and index expected to be set before call.
2686 // The function may destroy various registers, just not the cache and index registers.
2687 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2688 // Cache is rcx and index is rdx
2689 const Register entry = c_rarg2; // ResolvedFieldEntry
2690 const Register obj = c_rarg1; // Object pointer
2691 const Register value = c_rarg3; // JValue object
2692
2693 if (JvmtiExport::can_post_field_modification()) {
2694 // Check to see if a field modification watch has been set before
2695 // we take the time to call into the VM.
2696 Label L1;
2697 assert_different_registers(cache, obj, rax);
2698 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2699 __ testl(rax, rax);
2700 __ jcc(Assembler::zero, L1);
2701
2702 __ mov(entry, cache);
2703
2704 if (is_static) {
2705 // Life is simple. Null out the object pointer.
2706 __ xorl(obj, obj);
2707
2708 } else {
2709 // Life is harder. The stack holds the value on top, followed by
2710 // the object. We don't know the size of the value, though; it
2711 // could be one or two words depending on its type. As a result,
2712 // we must find the type to determine where the object is.
2713 __ load_unsigned_byte(value, Address(entry, in_bytes(ResolvedFieldEntry::type_offset())));
2714 __ movptr(obj, at_tos_p1()); // initially assume a one word jvalue
2715 __ cmpl(value, ltos);
2716 __ cmovptr(Assembler::equal,
2717 obj, at_tos_p2()); // ltos (two word jvalue)
2718 __ cmpl(value, dtos);
2719 __ cmovptr(Assembler::equal,
2720 obj, at_tos_p2()); // dtos (two word jvalue)
2721 }
2722
2723 // object (tos)
2724 __ mov(value, rsp);
2725 // obj: object pointer set up above (null if static)
2726 // cache: field entry pointer
2727 // value: jvalue object on the stack
2728 __ call_VM(noreg,
2729 CAST_FROM_FN_PTR(address,
2730 InterpreterRuntime::post_field_modification),
2731 obj, entry, value);
2732 // Reload field entry
2733 __ load_field_entry(cache, index);
2734 __ bind(L1);
2735 }
2736 }
2737
2738 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2739 transition(vtos, vtos);
2740
2741 const Register obj = rcx;
2742 const Register cache = rcx;
2743 const Register index = rdx;
2744 const Register tos_state = rdx;
2745 const Register off = rbx;
2746 const Register flags = rax;
2747
2748 resolve_cache_and_index_for_field(byte_no, cache, index);
2749 jvmti_post_field_mod(cache, index, is_static);
2750 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2751
2752 // [jk] not needed currently
2753 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2754 // Assembler::StoreStore));
2755
2756 Label notVolatile, Done;
2757
2758 // Check for volatile store
2759 __ andl(flags, (1 << ResolvedFieldEntry::is_volatile_shift));
2760 __ testl(flags, flags);
2761 __ jcc(Assembler::zero, notVolatile);
2762
2763 putfield_or_static_helper(byte_no, is_static, rc, obj, off, tos_state);
2764 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2765 Assembler::StoreStore));
2766 __ jmp(Done);
2767 __ bind(notVolatile);
2768
2769 putfield_or_static_helper(byte_no, is_static, rc, obj, off, tos_state);
2770
2771 __ bind(Done);
2772 }
2773
2774 void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
2775 Register obj, Register off, Register tos_state) {
2776
2777 // field addresses
2778 const Address field(obj, off, Address::times_1, 0*wordSize);
2779
2780 Label notByte, notBool, notInt, notShort, notChar,
2781 notLong, notFloat, notObj;
2782 Label Done;
2783
2784 const Register bc = c_rarg3;
2785
2786 // Test TOS state
2787 __ testl(tos_state, tos_state);
2788 __ jcc(Assembler::notZero, notByte);
2789
2790 // btos
2791 {
2792 __ pop(btos);
2793 if (!is_static) pop_and_check_object(obj);
2794 __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg, noreg);
2795 if (!is_static && rc == may_rewrite) {
2796 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
2797 }
2798 __ jmp(Done);
2799 }
2800
2801 __ bind(notByte);
2802 __ cmpl(tos_state, ztos);
2803 __ jcc(Assembler::notEqual, notBool);
2804
2805 // ztos
2806 {
2807 __ pop(ztos);
2808 if (!is_static) pop_and_check_object(obj);
2809 __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg, noreg);
2810 if (!is_static && rc == may_rewrite) {
2811 patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
2812 }
2813 __ jmp(Done);
2814 }
2815
2816 __ bind(notBool);
2817 __ cmpl(tos_state, atos);
2818 __ jcc(Assembler::notEqual, notObj);
2819
2820 // atos
2821 {
2822 __ pop(atos);
2823 if (!is_static) pop_and_check_object(obj);
2824 // Store into the field
2825 do_oop_store(_masm, field, rax);
2826 if (!is_static && rc == may_rewrite) {
2827 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
2828 }
2829 __ jmp(Done);
2830 }
2831
2832 __ bind(notObj);
2833 __ cmpl(tos_state, itos);
2834 __ jcc(Assembler::notEqual, notInt);
2835
2836 // itos
2837 {
2838 __ pop(itos);
2839 if (!is_static) pop_and_check_object(obj);
2840 __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg, noreg);
2841 if (!is_static && rc == may_rewrite) {
2842 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
2843 }
2844 __ jmp(Done);
2845 }
2846
2847 __ bind(notInt);
2848 __ cmpl(tos_state, ctos);
2849 __ jcc(Assembler::notEqual, notChar);
2850
2851 // ctos
2852 {
2853 __ pop(ctos);
2854 if (!is_static) pop_and_check_object(obj);
2855 __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg, noreg);
2856 if (!is_static && rc == may_rewrite) {
2857 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
2858 }
2859 __ jmp(Done);
2860 }
2861
2862 __ bind(notChar);
2863 __ cmpl(tos_state, stos);
2864 __ jcc(Assembler::notEqual, notShort);
2865
2866 // stos
2867 {
2868 __ pop(stos);
2869 if (!is_static) pop_and_check_object(obj);
2870 __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg, noreg);
2871 if (!is_static && rc == may_rewrite) {
2872 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
2873 }
2874 __ jmp(Done);
2875 }
2876
2877 __ bind(notShort);
2878 __ cmpl(tos_state, ltos);
2879 __ jcc(Assembler::notEqual, notLong);
2880
2881 // ltos
2882 {
2883 __ pop(ltos);
2884 if (!is_static) pop_and_check_object(obj);
2885 // MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
2886 __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg, noreg);
2887 if (!is_static && rc == may_rewrite) {
2888 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
2889 }
2890 __ jmp(Done);
2891 }
2892
2893 __ bind(notLong);
2894 __ cmpl(tos_state, ftos);
2895 __ jcc(Assembler::notEqual, notFloat);
2896
2897 // ftos
2898 {
2899 __ pop(ftos);
2900 if (!is_static) pop_and_check_object(obj);
2901 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
2902 if (!is_static && rc == may_rewrite) {
2903 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
2904 }
2905 __ jmp(Done);
2906 }
2907
2908 __ bind(notFloat);
2909 #ifdef ASSERT
2910 Label notDouble;
2911 __ cmpl(tos_state, dtos);
2912 __ jcc(Assembler::notEqual, notDouble);
2913 #endif
2914
2915 // dtos
2916 {
2917 __ pop(dtos);
2918 if (!is_static) pop_and_check_object(obj);
2919 // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
2920 __ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg, noreg);
2921 if (!is_static && rc == may_rewrite) {
2922 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
2923 }
2924 }
2925
2926 #ifdef ASSERT
2927 __ jmp(Done);
2928
2929 __ bind(notDouble);
2930 __ stop("Bad state");
2931 #endif
2932
2933 __ bind(Done);
2934 }
2935
2936 void TemplateTable::putfield(int byte_no) {
2937 putfield_or_static(byte_no, false);
2938 }
2939
2940 void TemplateTable::nofast_putfield(int byte_no) {
2941 putfield_or_static(byte_no, false, may_not_rewrite);
2942 }
2943
2944 void TemplateTable::putstatic(int byte_no) {
2945 putfield_or_static(byte_no, true);
2946 }
2947
2948 void TemplateTable::jvmti_post_fast_field_mod() {
2949
2950 const Register scratch = c_rarg3;
2951
2952 if (JvmtiExport::can_post_field_modification()) {
2953 // Check to see if a field modification watch has been set before
2954 // we take the time to call into the VM.
2955 Label L2;
2956 __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2957 __ testl(scratch, scratch);
2958 __ jcc(Assembler::zero, L2);
2959 __ pop_ptr(rbx); // copy the object pointer from tos
2960 __ verify_oop(rbx);
2961 __ push_ptr(rbx); // put the object pointer back on tos
2962 // Save tos values before call_VM() clobbers them. Since we have
2963 // to do it for every data type, we use the saved values as the
2964 // jvalue object.
2965 switch (bytecode()) { // load values into the jvalue object
2966 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2967 case Bytecodes::_fast_bputfield: // fall through
2968 case Bytecodes::_fast_zputfield: // fall through
2969 case Bytecodes::_fast_sputfield: // fall through
2970 case Bytecodes::_fast_cputfield: // fall through
2971 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2972 case Bytecodes::_fast_dputfield: __ push(dtos); break;
2973 case Bytecodes::_fast_fputfield: __ push(ftos); break;
2974 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2975
2976 default:
2977 ShouldNotReachHere();
2978 }
2979 __ mov(scratch, rsp); // points to jvalue on the stack
2980 // access constant pool cache entry
2981 __ load_field_entry(c_rarg2, rax);
2982 __ verify_oop(rbx);
2983 // rbx: object pointer copied above
2984 // c_rarg2: cache entry pointer
2985 // c_rarg3: jvalue object on the stack
2986 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3);
2987
2988 switch (bytecode()) { // restore tos values
2989 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2990 case Bytecodes::_fast_bputfield: // fall through
2991 case Bytecodes::_fast_zputfield: // fall through
2992 case Bytecodes::_fast_sputfield: // fall through
2993 case Bytecodes::_fast_cputfield: // fall through
2994 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2995 case Bytecodes::_fast_dputfield: __ pop(dtos); break;
2996 case Bytecodes::_fast_fputfield: __ pop(ftos); break;
2997 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2998 default: break;
2999 }
3000 __ bind(L2);
3001 }
3002 }
3003
3004 void TemplateTable::fast_storefield(TosState state) {
3005 transition(state, vtos);
3006
3007 Register cache = rcx;
3008
3009 Label notVolatile, Done;
3010
3011 jvmti_post_fast_field_mod();
3012
3013 __ push(rax);
3014 __ load_field_entry(rcx, rax);
3015 load_resolved_field_entry(noreg, cache, rax, rbx, rdx);
3016 // RBX: field offset, RAX: TOS, RDX: flags
3017 __ andl(rdx, (1 << ResolvedFieldEntry::is_volatile_shift));
3018 __ pop(rax);
3019
3020 // Get object from stack
3021 pop_and_check_object(rcx);
3022
3023 // field address
3024 const Address field(rcx, rbx, Address::times_1);
3025
3026 // Check for volatile store
3027 __ testl(rdx, rdx);
3028 __ jcc(Assembler::zero, notVolatile);
3029
3030 fast_storefield_helper(field, rax);
3031 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3032 Assembler::StoreStore));
3033 __ jmp(Done);
3034 __ bind(notVolatile);
3035
3036 fast_storefield_helper(field, rax);
3037
3038 __ bind(Done);
3039 }
3040
3041 void TemplateTable::fast_storefield_helper(Address field, Register rax) {
3042
3043 // access field
3044 switch (bytecode()) {
3045 case Bytecodes::_fast_aputfield:
3046 do_oop_store(_masm, field, rax);
3047 break;
3048 case Bytecodes::_fast_lputfield:
3049 __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg, noreg);
3050 break;
3051 case Bytecodes::_fast_iputfield:
3052 __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg, noreg);
3053 break;
3054 case Bytecodes::_fast_zputfield:
3055 __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg, noreg);
3056 break;
3057 case Bytecodes::_fast_bputfield:
3058 __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg, noreg);
3059 break;
3060 case Bytecodes::_fast_sputfield:
3061 __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg, noreg);
3062 break;
3063 case Bytecodes::_fast_cputfield:
3064 __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg, noreg);
3065 break;
3066 case Bytecodes::_fast_fputfield:
3067 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg, noreg);
3068 break;
3069 case Bytecodes::_fast_dputfield:
3070 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg, noreg);
3071 break;
3072 default:
3073 ShouldNotReachHere();
3074 }
3075 }
3076
3077 void TemplateTable::fast_accessfield(TosState state) {
3078 transition(atos, state);
3079
3080 // Do the JVMTI work here to avoid disturbing the register state below
3081 if (JvmtiExport::can_post_field_access()) {
3082 // Check to see if a field access watch has been set before we
3083 // take the time to call into the VM.
3084 Label L1;
3085 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3086 __ testl(rcx, rcx);
3087 __ jcc(Assembler::zero, L1);
3088 // access constant pool cache entry
3089 __ load_field_entry(c_rarg2, rcx);
3090 __ verify_oop(rax);
3091 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
3092 __ mov(c_rarg1, rax);
3093 // c_rarg1: object pointer copied above
3094 // c_rarg2: cache entry pointer
3095 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2);
3096 __ pop_ptr(rax); // restore object pointer
3097 __ bind(L1);
3098 }
3099
3100 // access constant pool cache
3101 __ load_field_entry(rcx, rbx);
3102 __ load_sized_value(rbx, Address(rcx, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3103
3104 // rax: object
3105 __ verify_oop(rax);
3106 __ null_check(rax);
3107 Address field(rax, rbx, Address::times_1);
3108
3109 // access field
3110 switch (bytecode()) {
3111 case Bytecodes::_fast_agetfield:
3112 do_oop_load(_masm, field, rax);
3113 __ verify_oop(rax);
3114 break;
3115 case Bytecodes::_fast_lgetfield:
3116 __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg);
3117 break;
3118 case Bytecodes::_fast_igetfield:
3119 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg);
3120 break;
3121 case Bytecodes::_fast_bgetfield:
3122 __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg);
3123 break;
3124 case Bytecodes::_fast_sgetfield:
3125 __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg);
3126 break;
3127 case Bytecodes::_fast_cgetfield:
3128 __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg);
3129 break;
3130 case Bytecodes::_fast_fgetfield:
3131 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg);
3132 break;
3133 case Bytecodes::_fast_dgetfield:
3134 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg);
3135 break;
3136 default:
3137 ShouldNotReachHere();
3138 }
3139 // [jk] not needed currently
3140 // Label notVolatile;
3141 // __ testl(rdx, rdx);
3142 // __ jcc(Assembler::zero, notVolatile);
3143 // __ membar(Assembler::LoadLoad);
3144 // __ bind(notVolatile);
3145 }
3146
3147 void TemplateTable::fast_xaccess(TosState state) {
3148 transition(vtos, state);
3149
3150 // get receiver
3151 __ movptr(rax, aaddress(0));
3152 // access constant pool cache
3153 __ load_field_entry(rcx, rdx, 2);
3154 __ load_sized_value(rbx, Address(rcx, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3155
3156 // make sure exception is reported in correct bcp range (getfield is
3157 // next instruction)
3158 __ increment(rbcp);
3159 __ null_check(rax);
3160 const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3161 switch (state) {
3162 case itos:
3163 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg);
3164 break;
3165 case atos:
3166 do_oop_load(_masm, field, rax);
3167 __ verify_oop(rax);
3168 break;
3169 case ftos:
3170 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg);
3171 break;
3172 default:
3173 ShouldNotReachHere();
3174 }
3175
3176 // [jk] not needed currently
3177 // Label notVolatile;
3178 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3179 // in_bytes(ConstantPoolCache::base_offset() +
3180 // ConstantPoolCacheEntry::flags_offset())));
3181 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3182 // __ testl(rdx, 0x1);
3183 // __ jcc(Assembler::zero, notVolatile);
3184 // __ membar(Assembler::LoadLoad);
3185 // __ bind(notVolatile);
3186
3187 __ decrement(rbcp);
3188 }
3189
3190 //-----------------------------------------------------------------------------
3191 // Calls
3192
3193 void TemplateTable::prepare_invoke(Register cache, Register recv, Register flags) {
3194 // determine flags
3195 const Bytecodes::Code code = bytecode();
3196 const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
3197 assert_different_registers(recv, flags);
3198
3199 // save 'interpreter return address'
3200 __ save_bcp();
3201
3202 // Save flags and load TOS
3203 __ movl(rbcp, flags);
3204 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
3205
3206 // load receiver if needed (after appendix is pushed so parameter size is correct)
3207 // Note: no return address pushed yet
3208 if (load_receiver) {
3209 __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
3210 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3211 const int receiver_is_at_end = -1; // back off one slot to get receiver
3212 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3213 __ movptr(recv, recv_addr);
3214 __ verify_oop(recv);
3215 }
3216
3217 // load return address
3218 {
3219 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3220 ExternalAddress table(table_addr);
3221 __ lea(rscratch1, table);
3222 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
3223 }
3224
3225 // push return address
3226 __ push(flags);
3227
3228 // Restore flags value from the constant pool cache entry, and restore rsi
3229 // for later null checks. r13 is the bytecode pointer
3230 __ movl(flags, rbcp);
3231 __ restore_bcp();
3232 }
3233
3234 void TemplateTable::invokevirtual_helper(Register index,
3235 Register recv,
3236 Register flags) {
3237 // Uses temporary registers rax, rdx
3238 assert_different_registers(index, recv, rax, rdx);
3239 assert(index == rbx, "");
3240 assert(recv == rcx, "");
3241
3242 // Test for an invoke of a final method
3243 Label notFinal;
3244 __ movl(rax, flags);
3245 __ andl(rax, (1 << ResolvedMethodEntry::is_vfinal_shift));
3246 __ jcc(Assembler::zero, notFinal);
3247
3248 const Register method = index; // method must be rbx
3249 assert(method == rbx,
3250 "Method* must be rbx for interpreter calling convention");
3251
3252 // do the call - the index is actually the method to call
3253 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3254
3255 // It's final, need a null check here!
3256 __ null_check(recv);
3257
3258 // profile this call
3259 __ profile_final_call(rax);
3260 __ profile_arguments_type(rax, method, rbcp, true);
3261
3262 __ jump_from_interpreted(method, rax);
3263
3264 __ bind(notFinal);
3265
3266 // get receiver klass
3267 __ load_klass(rax, recv, rscratch1);
3268
3269 // profile this call
3270 __ profile_virtual_call(rax, rlocals);
3271 // get target Method* & entry point
3272 __ lookup_virtual_method(rax, index, method);
3273
3274 __ profile_arguments_type(rdx, method, rbcp, true);
3275 __ jump_from_interpreted(method, rdx);
3276 }
3277
3278 void TemplateTable::invokevirtual(int byte_no) {
3279 transition(vtos, vtos);
3280 assert(byte_no == f2_byte, "use this argument");
3281
3282 load_resolved_method_entry_virtual(rcx, // ResolvedMethodEntry*
3283 rbx, // Method or itable index
3284 rdx); // Flags
3285 prepare_invoke(rcx, // ResolvedMethodEntry*
3286 rcx, // Receiver
3287 rdx); // flags
3288
3289 // rbx: index
3290 // rcx: receiver
3291 // rdx: flags
3292 invokevirtual_helper(rbx, rcx, rdx);
3293 }
3294
3295 void TemplateTable::invokespecial(int byte_no) {
3296 transition(vtos, vtos);
3297 assert(byte_no == f1_byte, "use this argument");
3298
3299 load_resolved_method_entry_special_or_static(rcx, // ResolvedMethodEntry*
3300 rbx, // Method*
3301 rdx); // flags
3302 prepare_invoke(rcx,
3303 rcx, // get receiver also for null check
3304 rdx); // flags
3305
3306 __ verify_oop(rcx);
3307 __ null_check(rcx);
3308 // do the call
3309 __ profile_call(rax);
3310 __ profile_arguments_type(rax, rbx, rbcp, false);
3311 __ jump_from_interpreted(rbx, rax);
3312 }
3313
3314 void TemplateTable::invokestatic(int byte_no) {
3315 transition(vtos, vtos);
3316 assert(byte_no == f1_byte, "use this argument");
3317
3318 load_resolved_method_entry_special_or_static(rcx, // ResolvedMethodEntry*
3319 rbx, // Method*
3320 rdx // flags
3321 );
3322 prepare_invoke(rcx, rcx, rdx); // cache and flags
3323
3324 // do the call
3325 __ profile_call(rax);
3326 __ profile_arguments_type(rax, rbx, rbcp, false);
3327 __ jump_from_interpreted(rbx, rax);
3328 }
3329
3330
3331 void TemplateTable::fast_invokevfinal(int byte_no) {
3332 transition(vtos, vtos);
3333 assert(byte_no == f2_byte, "use this argument");
3334 __ stop("fast_invokevfinal not used on x86");
3335 }
3336
3337
3338 void TemplateTable::invokeinterface(int byte_no) {
3339 transition(vtos, vtos);
3340 assert(byte_no == f1_byte, "use this argument");
3341
3342 load_resolved_method_entry_interface(rcx, // ResolvedMethodEntry*
3343 rax, // Klass*
3344 rbx, // Method* or itable/vtable index
3345 rdx); // flags
3346 prepare_invoke(rcx, rcx, rdx); // receiver, flags
3347
3348 // First check for Object case, then private interface method,
3349 // then regular interface method.
3350
3351 // Special case of invokeinterface called for virtual method of
3352 // java.lang.Object. See cpCache.cpp for details.
3353 Label notObjectMethod;
3354 __ movl(rlocals, rdx);
3355 __ andl(rlocals, (1 << ResolvedMethodEntry::is_forced_virtual_shift));
3356 __ jcc(Assembler::zero, notObjectMethod);
3357
3358 invokevirtual_helper(rbx, rcx, rdx);
3359 // no return from above
3360 __ bind(notObjectMethod);
3361
3362 Label no_such_interface; // for receiver subtype check
3363 Register recvKlass; // used for exception processing
3364
3365 // Check for private method invocation - indicated by vfinal
3366 Label notVFinal;
3367 __ movl(rlocals, rdx);
3368 __ andl(rlocals, (1 << ResolvedMethodEntry::is_vfinal_shift));
3369 __ jcc(Assembler::zero, notVFinal);
3370
3371 // Get receiver klass into rlocals - also a null check
3372 __ load_klass(rlocals, rcx, rscratch1);
3373
3374 Label subtype;
3375 __ check_klass_subtype(rlocals, rax, rbcp, subtype);
3376 // If we get here the typecheck failed
3377 recvKlass = rdx;
3378 __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
3379 __ jmp(no_such_interface);
3380
3381 __ bind(subtype);
3382
3383 // do the call - rbx is actually the method to call
3384
3385 __ profile_final_call(rdx);
3386 __ profile_arguments_type(rdx, rbx, rbcp, true);
3387
3388 __ jump_from_interpreted(rbx, rdx);
3389 // no return from above
3390 __ bind(notVFinal);
3391
3392 // Get receiver klass into rdx - also a null check
3393 __ restore_locals(); // restore r14
3394 __ load_klass(rdx, rcx, rscratch1);
3395
3396 Label no_such_method;
3397
3398 // Preserve method for throw_AbstractMethodErrorVerbose.
3399 __ mov(rcx, rbx);
3400 // Receiver subtype check against REFC.
3401 // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
3402 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3403 rdx, rax, noreg,
3404 // outputs: scan temp. reg, scan temp. reg
3405 rbcp, rlocals,
3406 no_such_interface,
3407 /*return_method=*/false);
3408
3409 // profile this call
3410 __ restore_bcp(); // rbcp was destroyed by receiver type check
3411 __ profile_virtual_call(rdx, rbcp);
3412
3413 // Get declaring interface class from method, and itable index
3414 __ load_method_holder(rax, rbx);
3415 __ movl(rbx, Address(rbx, Method::itable_index_offset()));
3416 __ subl(rbx, Method::itable_index_max);
3417 __ negl(rbx);
3418
3419 // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3420 __ mov(rlocals, rdx);
3421 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3422 rlocals, rax, rbx,
3423 // outputs: method, scan temp. reg
3424 rbx, rbcp,
3425 no_such_interface);
3426
3427 // rbx: Method* to call
3428 // rcx: receiver
3429 // Check for abstract method error
3430 // Note: This should be done more efficiently via a throw_abstract_method_error
3431 // interpreter entry point and a conditional jump to it in case of a null
3432 // method.
3433 __ testptr(rbx, rbx);
3434 __ jcc(Assembler::zero, no_such_method);
3435
3436 __ profile_arguments_type(rdx, rbx, rbcp, true);
3437
3438 // do the call
3439 // rcx: receiver
3440 // rbx,: Method*
3441 __ jump_from_interpreted(rbx, rdx);
3442 __ should_not_reach_here();
3443
3444 // exception handling code follows...
3445 // note: must restore interpreter registers to canonical
3446 // state for exception handling to work correctly!
3447
3448 __ bind(no_such_method);
3449 // throw exception
3450 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3451 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3452 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3453 // Pass arguments for generating a verbose error message.
3454 recvKlass = c_rarg1;
3455 Register method = c_rarg2;
3456 if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
3457 if (method != rcx) { __ movq(method, rcx); }
3458 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3459 recvKlass, method);
3460 // The call_VM checks for exception, so we should never return here.
3461 __ should_not_reach_here();
3462
3463 __ bind(no_such_interface);
3464 // throw exception
3465 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3466 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3467 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3468 // Pass arguments for generating a verbose error message.
3469 if (recvKlass != rdx) {
3470 __ movq(recvKlass, rdx);
3471 }
3472 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3473 recvKlass, rax);
3474 // the call_VM checks for exception, so we should never return here.
3475 __ should_not_reach_here();
3476 }
3477
3478 void TemplateTable::invokehandle(int byte_no) {
3479 transition(vtos, vtos);
3480 assert(byte_no == f1_byte, "use this argument");
3481 const Register rbx_method = rbx;
3482 const Register rax_mtype = rax;
3483 const Register rcx_recv = rcx;
3484 const Register rdx_flags = rdx;
3485
3486 load_resolved_method_entry_handle(rcx, rbx_method, rax_mtype, rdx_flags);
3487 prepare_invoke(rcx, rcx_recv, rdx_flags);
3488
3489 __ verify_method_ptr(rbx_method);
3490 __ verify_oop(rcx_recv);
3491 __ null_check(rcx_recv);
3492
3493 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3494 // rbx: MH.invokeExact_MT method
3495
3496 // Note: rax_mtype is already pushed (if necessary)
3497
3498 // FIXME: profile the LambdaForm also
3499 __ profile_final_call(rax);
3500 __ profile_arguments_type(rdx, rbx_method, rbcp, true);
3501
3502 __ jump_from_interpreted(rbx_method, rdx);
3503 }
3504
3505 void TemplateTable::invokedynamic(int byte_no) {
3506 transition(vtos, vtos);
3507 assert(byte_no == f1_byte, "use this argument");
3508
3509 const Register rbx_method = rbx;
3510 const Register rax_callsite = rax;
3511
3512 load_invokedynamic_entry(rbx_method);
3513 // rax: CallSite object (from cpool->resolved_references[])
3514 // rbx: MH.linkToCallSite method
3515
3516 // Note: rax_callsite is already pushed
3517
3518 // %%% should make a type profile for any invokedynamic that takes a ref argument
3519 // profile this call
3520 __ profile_call(rbcp);
3521 __ profile_arguments_type(rdx, rbx_method, rbcp, false);
3522
3523 __ verify_oop(rax_callsite);
3524
3525 __ jump_from_interpreted(rbx_method, rdx);
3526 }
3527
3528 //-----------------------------------------------------------------------------
3529 // Allocation
3530
3531 void TemplateTable::_new() {
3532 transition(vtos, atos);
3533 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3534 Label slow_case;
3535 Label slow_case_no_pop;
3536 Label done;
3537 Label initialize_header;
3538
3539 __ get_cpool_and_tags(rcx, rax);
3540
3541 // Make sure the class we're about to instantiate has been resolved.
3542 // This is done before loading InstanceKlass to be consistent with the order
3543 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3544 const int tags_offset = Array<u1>::base_offset_in_bytes();
3545 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3546 __ jcc(Assembler::notEqual, slow_case_no_pop);
3547
3548 // get InstanceKlass
3549 __ load_resolved_klass_at_index(rcx, rcx, rdx);
3550 __ push(rcx); // save the contexts of klass for initializing the header
3551
3552 // make sure klass is initialized
3553 // init_state needs acquire, but x86 is TSO, and so we are already good.
3554 assert(VM_Version::supports_fast_class_init_checks(), "must support fast class initialization checks");
3555 __ clinit_barrier(rcx, nullptr /*L_fast_path*/, &slow_case);
3556
3557 // get instance_size in InstanceKlass (scaled to a count of bytes)
3558 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3559 // test to see if it is malformed in some way
3560 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3561 __ jcc(Assembler::notZero, slow_case);
3562
3563 // Allocate the instance:
3564 // If TLAB is enabled:
3565 // Try to allocate in the TLAB.
3566 // If fails, go to the slow path.
3567 // Initialize the allocation.
3568 // Exit.
3569 //
3570 // Go to slow path.
3571
3572 if (UseTLAB) {
3573 __ tlab_allocate(rax, rdx, 0, rcx, rbx, slow_case);
3574 if (ZeroTLAB) {
3575 // the fields have been already cleared
3576 __ jmp(initialize_header);
3577 }
3578
3579 // The object is initialized before the header. If the object size is
3580 // zero, go directly to the header initialization.
3581 if (UseCompactObjectHeaders) {
3582 assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
3583 __ decrement(rdx, oopDesc::base_offset_in_bytes());
3584 } else {
3585 __ decrement(rdx, sizeof(oopDesc));
3586 }
3587 __ jcc(Assembler::zero, initialize_header);
3588
3589 // Initialize topmost object field, divide rdx by 8, check if odd and
3590 // test if zero.
3591 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3592 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3593
3594 // rdx must have been multiple of 8
3595 #ifdef ASSERT
3596 // make sure rdx was multiple of 8
3597 Label L;
3598 // Ignore partial flag stall after shrl() since it is debug VM
3599 __ jcc(Assembler::carryClear, L);
3600 __ stop("object size is not multiple of 2 - adjust this code");
3601 __ bind(L);
3602 // rdx must be > 0, no extra check needed here
3603 #endif
3604
3605 // initialize remaining object fields: rdx was a multiple of 8
3606 { Label loop;
3607 __ bind(loop);
3608 int header_size_bytes = oopDesc::header_size() * HeapWordSize;
3609 assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
3610 __ movptr(Address(rax, rdx, Address::times_8, header_size_bytes - 1*oopSize), rcx);
3611 __ decrement(rdx);
3612 __ jcc(Assembler::notZero, loop);
3613 }
3614
3615 // initialize object header only.
3616 __ bind(initialize_header);
3617 if (UseCompactObjectHeaders) {
3618 __ pop(rcx); // get saved klass back in the register.
3619 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
3620 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rbx);
3621 } else {
3622 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3623 (intptr_t)markWord::prototype().value()); // header
3624 __ pop(rcx); // get saved klass back in the register.
3625 __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
3626 __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops
3627 __ store_klass(rax, rcx, rscratch1); // klass
3628 }
3629
3630 if (DTraceAllocProbes) {
3631 // Trigger dtrace event for fastpath
3632 __ push(atos);
3633 __ call_VM_leaf(
3634 CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), rax);
3635 __ pop(atos);
3636 }
3637
3638 __ jmp(done);
3639 }
3640
3641 // slow case
3642 __ bind(slow_case);
3643 __ pop(rcx); // restore stack pointer to what it was when we came in.
3644 __ bind(slow_case_no_pop);
3645
3646 __ get_constant_pool(c_rarg1);
3647 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3648 __ call_VM_preemptable(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3649 __ verify_oop(rax);
3650
3651 // continue
3652 __ bind(done);
3653 }
3654
3655 void TemplateTable::newarray() {
3656 transition(itos, atos);
3657 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3658 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3659 c_rarg1, rax);
3660 }
3661
3662 void TemplateTable::anewarray() {
3663 transition(itos, atos);
3664
3665 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3666 __ get_constant_pool(c_rarg1);
3667 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3668 c_rarg1, c_rarg2, rax);
3669 }
3670
3671 void TemplateTable::arraylength() {
3672 transition(atos, itos);
3673 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3674 }
3675
3676 void TemplateTable::checkcast() {
3677 transition(atos, atos);
3678 Label done, is_null, ok_is_subtype, quicked, resolved;
3679 __ testptr(rax, rax); // object is in rax
3680 __ jcc(Assembler::zero, is_null);
3681
3682 // Get cpool & tags index
3683 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3684 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3685 // See if bytecode has already been quicked
3686 __ cmpb(Address(rdx, rbx,
3687 Address::times_1,
3688 Array<u1>::base_offset_in_bytes()),
3689 JVM_CONSTANT_Class);
3690 __ jcc(Assembler::equal, quicked);
3691 __ push(atos); // save receiver for result, and for GC
3692 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3693
3694 __ get_vm_result_metadata(rax);
3695
3696 __ pop_ptr(rdx); // restore receiver
3697 __ jmpb(resolved);
3698
3699 // Get superklass in rax and subklass in rbx
3700 __ bind(quicked);
3701 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3702 __ load_resolved_klass_at_index(rax, rcx, rbx);
3703
3704 __ bind(resolved);
3705 __ load_klass(rbx, rdx, rscratch1);
3706
3707 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3708 // Superklass in rax. Subklass in rbx.
3709 __ gen_subtype_check(rbx, ok_is_subtype);
3710
3711 // Come here on failure
3712 __ push_ptr(rdx);
3713 // object is at TOS
3714 __ jump(RuntimeAddress(Interpreter::_throw_ClassCastException_entry));
3715
3716 // Come here on success
3717 __ bind(ok_is_subtype);
3718 __ mov(rax, rdx); // Restore object in rdx
3719
3720 // Collect counts on whether this check-cast sees nulls a lot or not.
3721 if (ProfileInterpreter) {
3722 __ jmp(done);
3723 __ bind(is_null);
3724 __ profile_null_seen(rcx);
3725 } else {
3726 __ bind(is_null); // same as 'done'
3727 }
3728 __ bind(done);
3729 }
3730
3731 void TemplateTable::instanceof() {
3732 transition(atos, itos);
3733 Label done, is_null, ok_is_subtype, quicked, resolved;
3734 __ testptr(rax, rax);
3735 __ jcc(Assembler::zero, is_null);
3736
3737 // Get cpool & tags index
3738 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3739 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3740 // See if bytecode has already been quicked
3741 __ cmpb(Address(rdx, rbx,
3742 Address::times_1,
3743 Array<u1>::base_offset_in_bytes()),
3744 JVM_CONSTANT_Class);
3745 __ jcc(Assembler::equal, quicked);
3746
3747 __ push(atos); // save receiver for result, and for GC
3748 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3749
3750 __ get_vm_result_metadata(rax);
3751
3752 __ pop_ptr(rdx); // restore receiver
3753 __ verify_oop(rdx);
3754 __ load_klass(rdx, rdx, rscratch1);
3755 __ jmpb(resolved);
3756
3757 // Get superklass in rax and subklass in rdx
3758 __ bind(quicked);
3759 __ load_klass(rdx, rax, rscratch1);
3760 __ load_resolved_klass_at_index(rax, rcx, rbx);
3761
3762 __ bind(resolved);
3763
3764 // Generate subtype check. Blows rcx, rdi
3765 // Superklass in rax. Subklass in rdx.
3766 __ gen_subtype_check(rdx, ok_is_subtype);
3767
3768 // Come here on failure
3769 __ xorl(rax, rax);
3770 __ jmpb(done);
3771 // Come here on success
3772 __ bind(ok_is_subtype);
3773 __ movl(rax, 1);
3774
3775 // Collect counts on whether this test sees nulls a lot or not.
3776 if (ProfileInterpreter) {
3777 __ jmp(done);
3778 __ bind(is_null);
3779 __ profile_null_seen(rcx);
3780 } else {
3781 __ bind(is_null); // same as 'done'
3782 }
3783 __ bind(done);
3784 // rax = 0: obj == nullptr or obj is not an instanceof the specified klass
3785 // rax = 1: obj != nullptr and obj is an instanceof the specified klass
3786 }
3787
3788
3789 //----------------------------------------------------------------------------------------------------
3790 // Breakpoints
3791 void TemplateTable::_breakpoint() {
3792 // Note: We get here even if we are single stepping..
3793 // jbug insists on setting breakpoints at every bytecode
3794 // even if we are in single step mode.
3795
3796 transition(vtos, vtos);
3797
3798 // get the unpatched byte code
3799 __ get_method(c_rarg1);
3800 __ call_VM(noreg,
3801 CAST_FROM_FN_PTR(address,
3802 InterpreterRuntime::get_original_bytecode_at),
3803 c_rarg1, rbcp);
3804 __ mov(rbx, rax); // why?
3805
3806 // post the breakpoint event
3807 __ get_method(c_rarg1);
3808 __ call_VM(noreg,
3809 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3810 c_rarg1, rbcp);
3811
3812 // complete the execution of original bytecode
3813 __ dispatch_only_normal(vtos);
3814 }
3815
3816 //-----------------------------------------------------------------------------
3817 // Exceptions
3818
3819 void TemplateTable::athrow() {
3820 transition(atos, vtos);
3821 __ null_check(rax);
3822 __ jump(RuntimeAddress(Interpreter::throw_exception_entry()));
3823 }
3824
3825 //-----------------------------------------------------------------------------
3826 // Synchronization
3827 //
3828 // Note: monitorenter & exit are symmetric routines; which is reflected
3829 // in the assembly code structure as well
3830 //
3831 // Stack layout:
3832 //
3833 // [expressions ] <--- rsp = expression stack top
3834 // ..
3835 // [expressions ]
3836 // [monitor entry] <--- monitor block top = expression stack bot
3837 // ..
3838 // [monitor entry]
3839 // [frame data ] <--- monitor block bot
3840 // ...
3841 // [saved rbp ] <--- rbp
3842 void TemplateTable::monitorenter() {
3843 transition(atos, vtos);
3844
3845 // check for null object
3846 __ null_check(rax);
3847
3848 const Address monitor_block_top(
3849 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3850 const Address monitor_block_bot(
3851 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3852 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
3853
3854 Label allocated;
3855
3856 Register rtop = c_rarg3;
3857 Register rbot = c_rarg2;
3858 Register rmon = c_rarg1;
3859
3860 // initialize entry pointer
3861 __ xorl(rmon, rmon); // points to free slot or null
3862
3863 // find a free slot in the monitor block (result in rmon)
3864 {
3865 Label entry, loop, exit;
3866 __ movptr(rtop, monitor_block_top); // derelativize pointer
3867 __ lea(rtop, Address(rbp, rtop, Address::times_ptr));
3868 // rtop points to current entry, starting with top-most entry
3869
3870 __ lea(rbot, monitor_block_bot); // points to word before bottom
3871 // of monitor block
3872 __ jmpb(entry);
3873
3874 __ bind(loop);
3875 // check if current entry is used
3876 __ cmpptr(Address(rtop, BasicObjectLock::obj_offset()), NULL_WORD);
3877 // if not used then remember entry in rmon
3878 __ cmovptr(Assembler::equal, rmon, rtop); // cmov => cmovptr
3879 // check if current entry is for same object
3880 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset()));
3881 // if same object then stop searching
3882 __ jccb(Assembler::equal, exit);
3883 // otherwise advance to next entry
3884 __ addptr(rtop, entry_size);
3885 __ bind(entry);
3886 // check if bottom reached
3887 __ cmpptr(rtop, rbot);
3888 // if not at bottom then check this entry
3889 __ jcc(Assembler::notEqual, loop);
3890 __ bind(exit);
3891 }
3892
3893 __ testptr(rmon, rmon); // check if a slot has been found
3894 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3895
3896 // allocate one if there's no free slot
3897 {
3898 Label entry, loop;
3899 // 1. compute new pointers // rsp: old expression stack top
3900 __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
3901 __ lea(rmon, Address(rbp, rmon, Address::times_ptr));
3902 __ subptr(rsp, entry_size); // move expression stack top
3903 __ subptr(rmon, entry_size); // move expression stack bottom
3904 __ mov(rtop, rsp); // set start value for copy loop
3905 __ subptr(monitor_block_bot, entry_size / wordSize); // set new monitor block bottom
3906 __ jmp(entry);
3907 // 2. move expression stack contents
3908 __ bind(loop);
3909 __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
3910 // word from old location
3911 __ movptr(Address(rtop, 0), rbot); // and store it at new location
3912 __ addptr(rtop, wordSize); // advance to next word
3913 __ bind(entry);
3914 __ cmpptr(rtop, rmon); // check if bottom reached
3915 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3916 // copy next word
3917 }
3918
3919 // call run-time routine
3920 // rmon: points to monitor entry
3921 __ bind(allocated);
3922
3923 // Increment bcp to point to the next bytecode, so exception
3924 // handling for async. exceptions work correctly.
3925 // The object has already been popped from the stack, so the
3926 // expression stack looks correct.
3927 __ increment(rbcp);
3928
3929 // store object
3930 __ movptr(Address(rmon, BasicObjectLock::obj_offset()), rax);
3931 __ lock_object(rmon);
3932
3933 // check to make sure this monitor doesn't cause stack overflow after locking
3934 __ save_bcp(); // in case of exception
3935 __ generate_stack_overflow_check(0);
3936
3937 // The bcp has already been incremented. Just need to dispatch to
3938 // next instruction.
3939 __ dispatch_next(vtos);
3940 }
3941
3942 void TemplateTable::monitorexit() {
3943 transition(atos, vtos);
3944
3945 // check for null object
3946 __ null_check(rax);
3947
3948 const Address monitor_block_top(
3949 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3950 const Address monitor_block_bot(
3951 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3952 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
3953
3954 Register rtop = c_rarg1;
3955 Register rbot = c_rarg2;
3956
3957 Label found;
3958
3959 // find matching slot
3960 {
3961 Label entry, loop;
3962 __ movptr(rtop, monitor_block_top); // derelativize pointer
3963 __ lea(rtop, Address(rbp, rtop, Address::times_ptr));
3964 // rtop points to current entry, starting with top-most entry
3965
3966 __ lea(rbot, monitor_block_bot); // points to word before bottom
3967 // of monitor block
3968 __ jmpb(entry);
3969
3970 __ bind(loop);
3971 // check if current entry is for same object
3972 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset()));
3973 // if same object then stop searching
3974 __ jcc(Assembler::equal, found);
3975 // otherwise advance to next entry
3976 __ addptr(rtop, entry_size);
3977 __ bind(entry);
3978 // check if bottom reached
3979 __ cmpptr(rtop, rbot);
3980 // if not at bottom then check this entry
3981 __ jcc(Assembler::notEqual, loop);
3982 }
3983
3984 // error handling. Unlocking was not block-structured
3985 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3986 InterpreterRuntime::throw_illegal_monitor_state_exception));
3987 __ should_not_reach_here();
3988
3989 // call run-time routine
3990 __ bind(found);
3991 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3992 __ unlock_object(rtop);
3993 __ pop_ptr(rax); // discard object
3994 }
3995
3996 // Wide instructions
3997 void TemplateTable::wide() {
3998 transition(vtos, vtos);
3999 __ load_unsigned_byte(rbx, at_bcp(1));
4000 ExternalAddress wtable((address)Interpreter::_wentry_point);
4001 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)), rscratch1);
4002 // Note: the rbcp increment step is part of the individual wide bytecode implementations
4003 }
4004
4005 // Multi arrays
4006 void TemplateTable::multianewarray() {
4007 transition(vtos, atos);
4008
4009 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4010 // last dim is on top of stack; we want address of first one:
4011 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4012 // the latter wordSize to point to the beginning of the array.
4013 __ lea(c_rarg1, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4014 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), c_rarg1);
4015 __ load_unsigned_byte(rbx, at_bcp(3));
4016 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
4017 }