674 }
675
676
677 void LIR_Assembler::emit_op2(LIR_Op2* op) {
678 switch (op->code()) {
679 case lir_cmp:
680 if (op->info() != NULL) {
681 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
682 "shouldn't be codeemitinfo for non-address operands");
683 add_debug_info_for_null_check_here(op->info()); // exception possible
684 }
685 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
686 break;
687
688 case lir_cmp_l2i:
689 case lir_cmp_fd2i:
690 case lir_ucmp_fd2i:
691 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
692 break;
693
694 case lir_cmove:
695 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
696 break;
697
698 case lir_shl:
699 case lir_shr:
700 case lir_ushr:
701 if (op->in_opr2()->is_constant()) {
702 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
703 } else {
704 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
705 }
706 break;
707
708 case lir_add:
709 case lir_sub:
710 case lir_mul:
711 case lir_div:
712 case lir_rem:
713 assert(op->fpu_pop_count() < 2, "");
714 arith_op(
715 op->code(),
716 op->in_opr1(),
739 op->in_opr1(),
740 op->in_opr2(),
741 op->result_opr());
742 break;
743
744 case lir_throw:
745 throw_op(op->in_opr1(), op->in_opr2(), op->info());
746 break;
747
748 case lir_xadd:
749 case lir_xchg:
750 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
751 break;
752
753 default:
754 Unimplemented();
755 break;
756 }
757 }
758
759
760 void LIR_Assembler::build_frame() {
761 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
762 }
763
764
765 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
766 assert(strict_fp_requires_explicit_rounding, "not required");
767 assert((src->is_single_fpu() && dest->is_single_stack()) ||
768 (src->is_double_fpu() && dest->is_double_stack()),
769 "round_fp: rounds register -> stack location");
770
771 reg2stack (src, dest, src->type(), pop_fpu_stack);
772 }
773
774
775 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
776 if (src->is_register()) {
777 if (dest->is_register()) {
778 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
674 }
675
676
677 void LIR_Assembler::emit_op2(LIR_Op2* op) {
678 switch (op->code()) {
679 case lir_cmp:
680 if (op->info() != NULL) {
681 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
682 "shouldn't be codeemitinfo for non-address operands");
683 add_debug_info_for_null_check_here(op->info()); // exception possible
684 }
685 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
686 break;
687
688 case lir_cmp_l2i:
689 case lir_cmp_fd2i:
690 case lir_ucmp_fd2i:
691 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
692 break;
693
694 #ifndef RISCV
695 case lir_cmove:
696 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
697 break;
698 #endif
699
700 case lir_shl:
701 case lir_shr:
702 case lir_ushr:
703 if (op->in_opr2()->is_constant()) {
704 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
705 } else {
706 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
707 }
708 break;
709
710 case lir_add:
711 case lir_sub:
712 case lir_mul:
713 case lir_div:
714 case lir_rem:
715 assert(op->fpu_pop_count() < 2, "");
716 arith_op(
717 op->code(),
718 op->in_opr1(),
741 op->in_opr1(),
742 op->in_opr2(),
743 op->result_opr());
744 break;
745
746 case lir_throw:
747 throw_op(op->in_opr1(), op->in_opr2(), op->info());
748 break;
749
750 case lir_xadd:
751 case lir_xchg:
752 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
753 break;
754
755 default:
756 Unimplemented();
757 break;
758 }
759 }
760
761 #ifdef RISCV
762 void LIR_Assembler::emit_op4(LIR_Op4* op) {
763 switch(op->code()) {
764 case lir_cmove:
765 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
766 break;
767
768 default:
769 Unimplemented();
770 break;
771 }
772 }
773 #endif
774
775 void LIR_Assembler::build_frame() {
776 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
777 }
778
779
780 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
781 assert(strict_fp_requires_explicit_rounding, "not required");
782 assert((src->is_single_fpu() && dest->is_single_stack()) ||
783 (src->is_double_fpu() && dest->is_double_stack()),
784 "round_fp: rounds register -> stack location");
785
786 reg2stack (src, dest, src->type(), pop_fpu_stack);
787 }
788
789
790 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
791 if (src->is_register()) {
792 if (dest->is_register()) {
793 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|