4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 /* internal defines */
32 typedef struct DisasContext {
35 /* Nonzero if this instruction has been conditionally skipped. */
37 /* The label that will be jumped to when the instruction is skipped. */
39 struct TranslationBlock *tb;
40 int singlestep_enabled;
44 #define DISAS_JUMP_NEXT 4
46 /* XXX: move that elsewhere */
47 static uint16_t *gen_opc_ptr;
48 static uint32_t *gen_opparam_ptr;
53 #define DEF(s, n, copy_size) INDEX_op_ ## s,
61 static GenOpFunc1 *gen_test_cc[14] = {
78 const uint8_t table_logic_cc[16] = {
97 static GenOpFunc1 *gen_shift_T1_im[4] = {
104 static GenOpFunc *gen_shift_T1_0[4] = {
111 static GenOpFunc1 *gen_shift_T2_im[4] = {
118 static GenOpFunc *gen_shift_T2_0[4] = {
125 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
126 gen_op_shll_T1_im_cc,
127 gen_op_shrl_T1_im_cc,
128 gen_op_sarl_T1_im_cc,
129 gen_op_rorl_T1_im_cc,
132 static GenOpFunc *gen_shift_T1_0_cc[4] = {
139 static GenOpFunc *gen_shift_T1_T0[4] = {
146 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
147 gen_op_shll_T1_T0_cc,
148 gen_op_shrl_T1_T0_cc,
149 gen_op_sarl_T1_T0_cc,
150 gen_op_rorl_T1_T0_cc,
153 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
210 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
249 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
255 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
256 gen_op_shll_T0_im_thumb,
257 gen_op_shrl_T0_im_thumb,
258 gen_op_sarl_T0_im_thumb,
261 static inline void gen_bx(DisasContext *s)
263 s->is_jmp = DISAS_UPDATE;
267 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
272 /* normaly, since we updated PC, we need only to add one insn */
274 val = (long)s->pc + 2;
276 val = (long)s->pc + 4;
277 gen_op_movl_TN_im[t](val);
279 gen_op_movl_TN_reg[t][reg]();
283 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
285 gen_movl_TN_reg(s, reg, 0);
288 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
290 gen_movl_TN_reg(s, reg, 1);
293 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
295 gen_movl_TN_reg(s, reg, 2);
298 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
300 gen_op_movl_reg_TN[t][reg]();
302 s->is_jmp = DISAS_JUMP;
306 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
308 gen_movl_reg_TN(s, reg, 0);
311 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
313 gen_movl_reg_TN(s, reg, 1);
316 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
318 int val, rm, shift, shiftop;
320 if (!(insn & (1 << 25))) {
323 if (!(insn & (1 << 23)))
326 gen_op_addl_T1_im(val);
330 shift = (insn >> 7) & 0x1f;
331 gen_movl_T2_reg(s, rm);
332 shiftop = (insn >> 5) & 3;
334 gen_shift_T2_im[shiftop](shift);
335 } else if (shiftop != 0) {
336 gen_shift_T2_0[shiftop]();
338 if (!(insn & (1 << 23)))
345 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn)
349 if (insn & (1 << 22)) {
351 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
352 if (!(insn & (1 << 23)))
355 gen_op_addl_T1_im(val);
359 gen_movl_T2_reg(s, rm);
360 if (!(insn & (1 << 23)))
367 #define VFP_OP(name) \
368 static inline void gen_vfp_##name(int dp) \
371 gen_op_vfp_##name##d(); \
373 gen_op_vfp_##name##s(); \
398 vfp_reg_offset (int dp, int reg)
401 return offsetof(CPUARMState, vfp.regs[reg]);
403 return offsetof(CPUARMState, vfp.regs[reg >> 1])
404 + offsetof(CPU_DoubleU, l.upper);
406 return offsetof(CPUARMState, vfp.regs[reg >> 1])
407 + offsetof(CPU_DoubleU, l.lower);
410 static inline void gen_mov_F0_vreg(int dp, int reg)
413 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
415 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
418 static inline void gen_mov_F1_vreg(int dp, int reg)
421 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
423 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
426 static inline void gen_mov_vreg_F0(int dp, int reg)
429 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
431 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
434 /* Disassemble a VFP instruction. Returns nonzero if an error occured
435 (ie. an undefined instruction). */
436 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
438 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
441 dp = ((insn & 0xf00) == 0xb00);
442 switch ((insn >> 24) & 0xf) {
444 if (insn & (1 << 4)) {
445 /* single register transfer */
446 if ((insn & 0x6f) != 0x00)
448 rd = (insn >> 12) & 0xf;
452 rn = (insn >> 16) & 0xf;
453 /* Get the existing value even for arm->vfp moves because
454 we only set half the register. */
455 gen_mov_F0_vreg(1, rn);
457 if (insn & (1 << 20)) {
459 if (insn & (1 << 21))
460 gen_movl_reg_T1(s, rd);
462 gen_movl_reg_T0(s, rd);
465 if (insn & (1 << 21))
466 gen_movl_T1_reg(s, rd);
468 gen_movl_T0_reg(s, rd);
470 gen_mov_vreg_F0(dp, rn);
473 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
474 if (insn & (1 << 20)) {
476 if (insn & (1 << 21)) {
477 /* system register */
484 gen_op_vfp_movl_T0_fpscr_flags();
486 gen_op_vfp_movl_T0_fpscr();
492 gen_mov_F0_vreg(0, rn);
496 /* This will only set the 4 flag bits */
497 gen_op_movl_psr_T0();
499 gen_movl_reg_T0(s, rd);
502 gen_movl_T0_reg(s, rd);
503 if (insn & (1 << 21)) {
504 /* system register */
507 /* Writes are ignored. */
510 gen_op_vfp_movl_fpscr_T0();
511 /* This could change vector settings, so jump to
512 the next instuction. */
513 gen_op_movl_T0_im(s->pc);
514 gen_movl_reg_T0(s, 15);
515 s->is_jmp = DISAS_UPDATE;
522 gen_mov_vreg_F0(0, rn);
527 /* data processing */
528 /* The opcode is in bits 23, 21, 20 and 6. */
529 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
533 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
535 /* rn is register number */
538 rn = (insn >> 16) & 0xf;
541 if (op == 15 && (rn == 15 || rn > 17)) {
542 /* Integer or single precision destination. */
543 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
545 if (insn & (1 << 22))
547 rd = (insn >> 12) & 0xf;
550 if (op == 15 && (rn == 16 || rn == 17)) {
551 /* Integer source. */
552 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
559 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
560 if (op == 15 && rn == 15) {
561 /* Double precision destination. */
562 if (insn & (1 << 22))
564 rd = (insn >> 12) & 0xf;
566 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
567 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
570 veclen = env->vfp.vec_len;
571 if (op == 15 && rn > 3)
574 /* Shut up compiler warnings. */
585 /* Figure out what type of vector operation this is. */
586 if ((rd & bank_mask) == 0) {
591 delta_d = (env->vfp.vec_stride >> 1) + 1;
593 delta_d = env->vfp.vec_stride + 1;
595 if ((rm & bank_mask) == 0) {
596 /* mixed scalar/vector */
605 /* Load the initial operands. */
611 gen_mov_F0_vreg(0, rm);
616 gen_mov_F0_vreg(dp, rd);
617 gen_mov_F1_vreg(dp, rm);
621 /* Compare with zero */
622 gen_mov_F0_vreg(dp, rd);
626 /* One source operand. */
627 gen_mov_F0_vreg(dp, rm);
630 /* Two source operands. */
631 gen_mov_F0_vreg(dp, rn);
632 gen_mov_F1_vreg(dp, rm);
636 /* Perform the calculation. */
638 case 0: /* mac: fd + (fn * fm) */
640 gen_mov_F1_vreg(dp, rd);
643 case 1: /* nmac: fd - (fn * fm) */
646 gen_mov_F1_vreg(dp, rd);
649 case 2: /* msc: -fd + (fn * fm) */
651 gen_mov_F1_vreg(dp, rd);
654 case 3: /* nmsc: -fd - (fn * fm) */
656 gen_mov_F1_vreg(dp, rd);
660 case 4: /* mul: fn * fm */
663 case 5: /* nmul: -(fn * fm) */
667 case 6: /* add: fn + fm */
670 case 7: /* sub: fn - fm */
673 case 8: /* div: fn / fm */
676 case 15: /* extension space */
703 case 15: /* single<->double conversion */
718 case 25: /* ftouiz */
724 case 27: /* ftosiz */
727 default: /* undefined */
728 printf ("rn:%d\n", rn);
732 default: /* undefined */
733 printf ("op:%d\n", op);
737 /* Write back the result. */
738 if (op == 15 && (rn >= 8 && rn <= 11))
739 ; /* Comparison, do nothing. */
740 else if (op == 15 && rn > 17)
741 /* Integer result. */
742 gen_mov_vreg_F0(0, rd);
743 else if (op == 15 && rn == 15)
745 gen_mov_vreg_F0(!dp, rd);
747 gen_mov_vreg_F0(dp, rd);
749 /* break out of the loop if we have finished */
753 if (op == 15 && delta_m == 0) {
754 /* single source one-many */
756 rd = ((rd + delta_d) & (bank_mask - 1))
758 gen_mov_vreg_F0(dp, rd);
762 /* Setup the next operands. */
764 rd = ((rd + delta_d) & (bank_mask - 1))
768 /* One source operand. */
769 rm = ((rm + delta_m) & (bank_mask - 1))
771 gen_mov_F0_vreg(dp, rm);
773 /* Two source operands. */
774 rn = ((rn + delta_d) & (bank_mask - 1))
776 gen_mov_F0_vreg(dp, rn);
778 rm = ((rm + delta_m) & (bank_mask - 1))
780 gen_mov_F1_vreg(dp, rm);
788 if (dp && (insn & (1 << 22))) {
789 /* two-register transfer */
790 rn = (insn >> 16) & 0xf;
791 rd = (insn >> 12) & 0xf;
797 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
799 if (insn & (1 << 20)) {
802 gen_mov_F0_vreg(1, rm);
804 gen_movl_reg_T0(s, rd);
805 gen_movl_reg_T1(s, rn);
807 gen_mov_F0_vreg(0, rm);
809 gen_movl_reg_T0(s, rn);
810 gen_mov_F0_vreg(0, rm + 1);
812 gen_movl_reg_T0(s, rd);
817 gen_movl_T0_reg(s, rd);
818 gen_movl_T1_reg(s, rn);
820 gen_mov_vreg_F0(1, rm);
822 gen_movl_T0_reg(s, rn);
824 gen_mov_vreg_F0(0, rm);
825 gen_movl_T0_reg(s, rd);
827 gen_mov_vreg_F0(0, rm + 1);
832 rn = (insn >> 16) & 0xf;
834 rd = (insn >> 12) & 0xf;
836 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
837 gen_movl_T1_reg(s, rn);
838 if ((insn & 0x01200000) == 0x01000000) {
839 /* Single load/store */
840 offset = (insn & 0xff) << 2;
841 if ((insn & (1 << 23)) == 0)
843 gen_op_addl_T1_im(offset);
844 if (insn & (1 << 20)) {
846 gen_mov_vreg_F0(dp, rd);
848 gen_mov_F0_vreg(dp, rd);
852 /* load/store multiple */
854 n = (insn >> 1) & 0x7f;
858 if (insn & (1 << 24)) /* pre-decrement */
859 gen_op_addl_T1_im(-((insn & 0xff) << 2));
865 for (i = 0; i < n; i++) {
866 if (insn & (1 << 20)) {
869 gen_mov_vreg_F0(dp, rd + i);
872 gen_mov_F0_vreg(dp, rd + i);
875 gen_op_addl_T1_im(offset);
877 if (insn & (1 << 21)) {
879 if (insn & (1 << 24))
880 offset = -offset * n;
881 else if (dp && (insn & 1))
887 gen_op_addl_T1_im(offset);
888 gen_movl_reg_T1(s, rn);
894 /* Should never happen. */
900 static inline void gen_jmp (DisasContext *s, uint32_t dest)
902 if (__builtin_expect(s->singlestep_enabled, 0)) {
903 /* An indirect jump so that we still trigger the debug exception. */
906 gen_op_movl_T0_im(dest);
909 gen_op_jmp0((long)s->tb, dest);
910 s->is_jmp = DISAS_TB_JUMP;
914 static void disas_arm_insn(CPUState * env, DisasContext *s)
916 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
923 /* Unconditional instructions. */
924 if ((insn & 0x0d70f000) == 0x0550f000)
926 else if ((insn & 0x0e000000) == 0x0a000000) {
927 /* branch link and change to thumb (blx <offset>) */
930 val = (uint32_t)s->pc;
931 gen_op_movl_T0_im(val);
932 gen_movl_reg_T0(s, 14);
933 /* Sign-extend the 24-bit offset */
934 offset = (((int32_t)insn) << 8) >> 8;
935 /* offset * 4 + bit24 * 2 + (thumb bit) */
936 val += (offset << 2) | ((insn >> 23) & 2) | 1;
937 /* pipeline offset */
939 gen_op_movl_T0_im(val);
942 } else if ((insn & 0x0fe00000) == 0x0c400000) {
943 /* Coprocessor double register transfer. */
944 } else if ((insn & 0x0f000010) == 0x0e000010) {
945 /* Additional coprocessor register transfer. */
950 /* if not always execute, we generate a conditional jump to
952 s->condlabel = gen_new_label();
953 gen_test_cc[cond ^ 1](s->condlabel);
955 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
956 //s->is_jmp = DISAS_JUMP_NEXT;
958 if ((insn & 0x0f900000) == 0x03000000) {
959 if ((insn & 0x0ff0f000) != 0x0360f000)
961 /* CPSR = immediate */
963 shift = ((insn >> 8) & 0xf) * 2;
965 val = (val >> shift) | (val << (32 - shift));
966 gen_op_movl_T0_im(val);
967 if (insn & (1 << 19))
968 gen_op_movl_psr_T0();
969 } else if ((insn & 0x0f900000) == 0x01000000
970 && (insn & 0x00000090) != 0x00000090) {
971 /* miscellaneous instructions */
972 op1 = (insn >> 21) & 3;
973 sh = (insn >> 4) & 0xf;
976 case 0x0: /* move program status register */
978 /* SPSR not accessible in user mode */
983 gen_movl_T0_reg(s, rm);
984 if (insn & (1 << 19))
985 gen_op_movl_psr_T0();
988 rd = (insn >> 12) & 0xf;
989 gen_op_movl_T0_psr();
990 gen_movl_reg_T0(s, rd);
995 /* branch/exchange thumb (bx). */
996 gen_movl_T0_reg(s, rm);
998 } else if (op1 == 3) {
1000 rd = (insn >> 12) & 0xf;
1001 gen_movl_T0_reg(s, rm);
1003 gen_movl_reg_T0(s, rd);
1012 /* branch link/exchange thumb (blx) */
1013 val = (uint32_t)s->pc;
1014 gen_op_movl_T0_im(val);
1015 gen_movl_reg_T0(s, 14);
1016 gen_movl_T0_reg(s, rm);
1019 case 0x5: /* saturating add/subtract */
1020 rd = (insn >> 12) & 0xf;
1021 rn = (insn >> 16) & 0xf;
1022 gen_movl_T0_reg(s, rm);
1023 gen_movl_T1_reg(s, rn);
1025 gen_op_double_T1_saturate();
1027 gen_op_subl_T0_T1_saturate();
1029 gen_op_addl_T0_T1_saturate();
1030 gen_movl_reg_T0(s, rd);
1032 case 0x8: /* signed multiply */
1036 rs = (insn >> 8) & 0xf;
1037 rn = (insn >> 12) & 0xf;
1038 rd = (insn >> 16) & 0xf;
1040 /* (32 * 16) >> 16 */
1041 gen_movl_T0_reg(s, rm);
1042 gen_movl_T1_reg(s, rs);
1044 gen_op_sarl_T1_im(16);
1047 gen_op_imulw_T0_T1();
1048 if ((sh & 2) == 0) {
1049 gen_movl_T1_reg(s, rn);
1050 gen_op_addl_T0_T1_setq();
1052 gen_movl_reg_T0(s, rd);
1055 gen_movl_T0_reg(s, rm);
1057 gen_op_sarl_T0_im(16);
1060 gen_movl_T1_reg(s, rs);
1062 gen_op_sarl_T1_im(16);
1066 gen_op_imull_T0_T1();
1067 gen_op_addq_T0_T1(rn, rd);
1068 gen_movl_reg_T0(s, rn);
1069 gen_movl_reg_T1(s, rd);
1073 gen_movl_T1_reg(s, rn);
1074 gen_op_addl_T0_T1_setq();
1076 gen_movl_reg_T0(s, rd);
1083 } else if (((insn & 0x0e000000) == 0 &&
1084 (insn & 0x00000090) != 0x90) ||
1085 ((insn & 0x0e000000) == (1 << 25))) {
1086 int set_cc, logic_cc, shiftop;
1088 op1 = (insn >> 21) & 0xf;
1089 set_cc = (insn >> 20) & 1;
1090 logic_cc = table_logic_cc[op1] & set_cc;
1092 /* data processing instruction */
1093 if (insn & (1 << 25)) {
1094 /* immediate operand */
1096 shift = ((insn >> 8) & 0xf) * 2;
1098 val = (val >> shift) | (val << (32 - shift));
1099 gen_op_movl_T1_im(val);
1100 if (logic_cc && shift)
1105 gen_movl_T1_reg(s, rm);
1106 shiftop = (insn >> 5) & 3;
1107 if (!(insn & (1 << 4))) {
1108 shift = (insn >> 7) & 0x1f;
1111 gen_shift_T1_im_cc[shiftop](shift);
1113 gen_shift_T1_im[shiftop](shift);
1115 } else if (shiftop != 0) {
1117 gen_shift_T1_0_cc[shiftop]();
1119 gen_shift_T1_0[shiftop]();
1123 rs = (insn >> 8) & 0xf;
1124 gen_movl_T0_reg(s, rs);
1126 gen_shift_T1_T0_cc[shiftop]();
1128 gen_shift_T1_T0[shiftop]();
1132 if (op1 != 0x0f && op1 != 0x0d) {
1133 rn = (insn >> 16) & 0xf;
1134 gen_movl_T0_reg(s, rn);
1136 rd = (insn >> 12) & 0xf;
1139 gen_op_andl_T0_T1();
1140 gen_movl_reg_T0(s, rd);
1142 gen_op_logic_T0_cc();
1145 gen_op_xorl_T0_T1();
1146 gen_movl_reg_T0(s, rd);
1148 gen_op_logic_T0_cc();
1152 gen_op_subl_T0_T1_cc();
1154 gen_op_subl_T0_T1();
1155 gen_movl_reg_T0(s, rd);
1159 gen_op_rsbl_T0_T1_cc();
1161 gen_op_rsbl_T0_T1();
1162 gen_movl_reg_T0(s, rd);
1166 gen_op_addl_T0_T1_cc();
1168 gen_op_addl_T0_T1();
1169 gen_movl_reg_T0(s, rd);
1173 gen_op_adcl_T0_T1_cc();
1175 gen_op_adcl_T0_T1();
1176 gen_movl_reg_T0(s, rd);
1180 gen_op_sbcl_T0_T1_cc();
1182 gen_op_sbcl_T0_T1();
1183 gen_movl_reg_T0(s, rd);
1187 gen_op_rscl_T0_T1_cc();
1189 gen_op_rscl_T0_T1();
1190 gen_movl_reg_T0(s, rd);
1194 gen_op_andl_T0_T1();
1195 gen_op_logic_T0_cc();
1200 gen_op_xorl_T0_T1();
1201 gen_op_logic_T0_cc();
1206 gen_op_subl_T0_T1_cc();
1211 gen_op_addl_T0_T1_cc();
1216 gen_movl_reg_T0(s, rd);
1218 gen_op_logic_T0_cc();
1221 gen_movl_reg_T1(s, rd);
1223 gen_op_logic_T1_cc();
1226 gen_op_bicl_T0_T1();
1227 gen_movl_reg_T0(s, rd);
1229 gen_op_logic_T0_cc();
1234 gen_movl_reg_T1(s, rd);
1236 gen_op_logic_T1_cc();
1240 /* other instructions */
1241 op1 = (insn >> 24) & 0xf;
1245 /* multiplies, extra load/stores */
1246 sh = (insn >> 5) & 3;
1249 rd = (insn >> 16) & 0xf;
1250 rn = (insn >> 12) & 0xf;
1251 rs = (insn >> 8) & 0xf;
1253 if (((insn >> 22) & 3) == 0) {
1255 gen_movl_T0_reg(s, rs);
1256 gen_movl_T1_reg(s, rm);
1258 if (insn & (1 << 21)) {
1259 gen_movl_T1_reg(s, rn);
1260 gen_op_addl_T0_T1();
1262 if (insn & (1 << 20))
1263 gen_op_logic_T0_cc();
1264 gen_movl_reg_T0(s, rd);
1267 gen_movl_T0_reg(s, rs);
1268 gen_movl_T1_reg(s, rm);
1269 if (insn & (1 << 22))
1270 gen_op_imull_T0_T1();
1272 gen_op_mull_T0_T1();
1273 if (insn & (1 << 21)) /* mult accumulate */
1274 gen_op_addq_T0_T1(rn, rd);
1275 if (!(insn & (1 << 23))) { /* double accumulate */
1276 gen_op_addq_lo_T0_T1(rn);
1277 gen_op_addq_lo_T0_T1(rd);
1279 if (insn & (1 << 20))
1281 gen_movl_reg_T0(s, rn);
1282 gen_movl_reg_T1(s, rd);
1285 rn = (insn >> 16) & 0xf;
1286 rd = (insn >> 12) & 0xf;
1287 if (insn & (1 << 23)) {
1288 /* load/store exclusive */
1291 /* SWP instruction */
1294 gen_movl_T0_reg(s, rm);
1295 gen_movl_T1_reg(s, rn);
1296 if (insn & (1 << 22)) {
1297 gen_op_swpb_T0_T1();
1299 gen_op_swpl_T0_T1();
1301 gen_movl_reg_T0(s, rd);
1305 /* Misc load/store */
1306 rn = (insn >> 16) & 0xf;
1307 rd = (insn >> 12) & 0xf;
1308 gen_movl_T1_reg(s, rn);
1309 if (insn & (1 << 24))
1310 gen_add_datah_offset(s, insn);
1311 if (insn & (1 << 20)) {
1315 gen_op_lduw_T0_T1();
1318 gen_op_ldsb_T0_T1();
1322 gen_op_ldsw_T0_T1();
1325 gen_movl_reg_T0(s, rd);
1326 } else if (sh & 2) {
1330 gen_movl_T0_reg(s, rd);
1332 gen_op_addl_T1_im(4);
1333 gen_movl_T0_reg(s, rd + 1);
1335 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1336 gen_op_addl_T1_im(-4);
1340 gen_movl_reg_T0(s, rd);
1341 gen_op_addl_T1_im(4);
1343 gen_movl_reg_T0(s, rd + 1);
1344 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1345 gen_op_addl_T1_im(-4);
1349 gen_movl_T0_reg(s, rd);
1352 if (!(insn & (1 << 24))) {
1353 gen_add_datah_offset(s, insn);
1354 gen_movl_reg_T1(s, rn);
1355 } else if (insn & (1 << 21)) {
1356 gen_movl_reg_T1(s, rn);
1364 /* load/store byte/word */
1365 rn = (insn >> 16) & 0xf;
1366 rd = (insn >> 12) & 0xf;
1367 gen_movl_T1_reg(s, rn);
1368 if (insn & (1 << 24))
1369 gen_add_data_offset(s, insn);
1370 if (insn & (1 << 20)) {
1372 if (insn & (1 << 22))
1373 gen_op_ldub_T0_T1();
1379 gen_movl_reg_T0(s, rd);
1382 gen_movl_T0_reg(s, rd);
1383 if (insn & (1 << 22))
1388 if (!(insn & (1 << 24))) {
1389 gen_add_data_offset(s, insn);
1390 gen_movl_reg_T1(s, rn);
1391 } else if (insn & (1 << 21))
1392 gen_movl_reg_T1(s, rn); {
1399 /* load/store multiple words */
1400 /* XXX: store correct base if write back */
1401 if (insn & (1 << 22))
1402 goto illegal_op; /* only usable in supervisor mode */
1403 rn = (insn >> 16) & 0xf;
1404 gen_movl_T1_reg(s, rn);
1406 /* compute total size */
1409 if (insn & (1 << i))
1412 /* XXX: test invalid n == 0 case ? */
1413 if (insn & (1 << 23)) {
1414 if (insn & (1 << 24)) {
1416 gen_op_addl_T1_im(4);
1418 /* post increment */
1421 if (insn & (1 << 24)) {
1423 gen_op_addl_T1_im(-(n * 4));
1425 /* post decrement */
1427 gen_op_addl_T1_im(-((n - 1) * 4));
1432 if (insn & (1 << i)) {
1433 if (insn & (1 << 20)) {
1439 gen_movl_reg_T0(s, i);
1443 /* special case: r15 = PC + 12 */
1444 val = (long)s->pc + 8;
1445 gen_op_movl_TN_im[0](val);
1447 gen_movl_T0_reg(s, i);
1452 /* no need to add after the last transfer */
1454 gen_op_addl_T1_im(4);
1457 if (insn & (1 << 21)) {
1459 if (insn & (1 << 23)) {
1460 if (insn & (1 << 24)) {
1463 /* post increment */
1464 gen_op_addl_T1_im(4);
1467 if (insn & (1 << 24)) {
1470 gen_op_addl_T1_im(-((n - 1) * 4));
1472 /* post decrement */
1473 gen_op_addl_T1_im(-(n * 4));
1476 gen_movl_reg_T1(s, rn);
1485 /* branch (and link) */
1486 val = (int32_t)s->pc;
1487 if (insn & (1 << 24)) {
1488 gen_op_movl_T0_im(val);
1489 gen_op_movl_reg_TN[0][14]();
1491 offset = (((int32_t)insn << 8) >> 8);
1492 val += (offset << 2) + 4;
1500 op1 = (insn >> 8) & 0xf;
1504 if (disas_vfp_insn (env, s, insn))
1508 /* unknown coprocessor. */
1514 gen_op_movl_T0_im((long)s->pc);
1515 gen_op_movl_reg_TN[0][15]();
1517 s->is_jmp = DISAS_JUMP;
1521 gen_op_movl_T0_im((long)s->pc - 4);
1522 gen_op_movl_reg_TN[0][15]();
1523 gen_op_undef_insn();
1524 s->is_jmp = DISAS_JUMP;
1530 static void disas_thumb_insn(DisasContext *s)
1532 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1539 switch (insn >> 12) {
1542 op = (insn >> 11) & 3;
1545 rn = (insn >> 3) & 7;
1546 gen_movl_T0_reg(s, rn);
1547 if (insn & (1 << 10)) {
1549 gen_op_movl_T1_im((insn >> 6) & 7);
1552 rm = (insn >> 6) & 7;
1553 gen_movl_T1_reg(s, rm);
1555 if (insn & (1 << 9))
1556 gen_op_subl_T0_T1_cc();
1558 gen_op_addl_T0_T1_cc();
1559 gen_movl_reg_T0(s, rd);
1561 /* shift immediate */
1562 rm = (insn >> 3) & 7;
1563 shift = (insn >> 6) & 0x1f;
1564 gen_movl_T0_reg(s, rm);
1565 gen_shift_T0_im_thumb[op](shift);
1566 gen_movl_reg_T0(s, rd);
1570 /* arithmetic large immediate */
1571 op = (insn >> 11) & 3;
1572 rd = (insn >> 8) & 0x7;
1574 gen_op_movl_T0_im(insn & 0xff);
1576 gen_movl_T0_reg(s, rd);
1577 gen_op_movl_T1_im(insn & 0xff);
1581 gen_op_logic_T0_cc();
1584 gen_op_subl_T0_T1_cc();
1587 gen_op_addl_T0_T1_cc();
1590 gen_op_subl_T0_T1_cc();
1594 gen_movl_reg_T0(s, rd);
1597 if (insn & (1 << 11)) {
1598 rd = (insn >> 8) & 7;
1599 /* load pc-relative. Bit 1 of PC is ignored. */
1600 val = s->pc + 2 + ((insn & 0xff) * 4);
1601 val &= ~(uint32_t)2;
1602 gen_op_movl_T1_im(val);
1604 gen_movl_reg_T0(s, rd);
1607 if (insn & (1 << 10)) {
1608 /* data processing extended or blx */
1609 rd = (insn & 7) | ((insn >> 4) & 8);
1610 rm = (insn >> 3) & 0xf;
1611 op = (insn >> 8) & 3;
1614 gen_movl_T0_reg(s, rd);
1615 gen_movl_T1_reg(s, rm);
1616 gen_op_addl_T0_T1();
1617 gen_movl_reg_T0(s, rd);
1620 gen_movl_T0_reg(s, rd);
1621 gen_movl_T1_reg(s, rm);
1622 gen_op_subl_T0_T1_cc();
1624 case 2: /* mov/cpy */
1625 gen_movl_T0_reg(s, rm);
1626 gen_movl_reg_T0(s, rd);
1628 case 3:/* branch [and link] exchange thumb register */
1629 if (insn & (1 << 7)) {
1630 val = (uint32_t)s->pc | 1;
1631 gen_op_movl_T1_im(val);
1632 gen_movl_reg_T1(s, 14);
1634 gen_movl_T0_reg(s, rm);
1641 /* data processing register */
1643 rm = (insn >> 3) & 7;
1644 op = (insn >> 6) & 0xf;
1645 if (op == 2 || op == 3 || op == 4 || op == 7) {
1646 /* the shift/rotate ops want the operands backwards */
1655 if (op == 9) /* neg */
1656 gen_op_movl_T0_im(0);
1657 else if (op != 0xf) /* mvn doesn't read its first operand */
1658 gen_movl_T0_reg(s, rd);
1660 gen_movl_T1_reg(s, rm);
1663 gen_op_andl_T0_T1();
1664 gen_op_logic_T0_cc();
1667 gen_op_xorl_T0_T1();
1668 gen_op_logic_T0_cc();
1671 gen_op_shll_T1_T0_cc();
1674 gen_op_shrl_T1_T0_cc();
1677 gen_op_sarl_T1_T0_cc();
1680 gen_op_adcl_T0_T1_cc();
1683 gen_op_sbcl_T0_T1_cc();
1686 gen_op_rorl_T1_T0_cc();
1689 gen_op_andl_T0_T1();
1690 gen_op_logic_T0_cc();
1694 gen_op_subl_T0_T1_cc();
1697 gen_op_subl_T0_T1_cc();
1701 gen_op_addl_T0_T1_cc();
1706 gen_op_logic_T0_cc();
1709 gen_op_mull_T0_T1();
1710 gen_op_logic_T0_cc();
1713 gen_op_bicl_T0_T1();
1714 gen_op_logic_T0_cc();
1718 gen_op_logic_T1_cc();
1725 gen_movl_reg_T1(s, rm);
1727 gen_movl_reg_T0(s, rd);
1732 /* load/store register offset. */
1734 rn = (insn >> 3) & 7;
1735 rm = (insn >> 6) & 7;
1736 op = (insn >> 9) & 7;
1737 gen_movl_T1_reg(s, rn);
1738 gen_movl_T2_reg(s, rm);
1739 gen_op_addl_T1_T2();
1741 if (op < 3) /* store */
1742 gen_movl_T0_reg(s, rd);
1755 gen_op_ldsb_T0_T1();
1761 gen_op_lduw_T0_T1();
1764 gen_op_ldub_T0_T1();
1767 gen_op_ldsw_T0_T1();
1770 if (op >= 3) /* load */
1771 gen_movl_reg_T0(s, rd);
1775 /* load/store word immediate offset */
1777 rn = (insn >> 3) & 7;
1778 gen_movl_T1_reg(s, rn);
1779 val = (insn >> 4) & 0x7c;
1780 gen_op_movl_T2_im(val);
1781 gen_op_addl_T1_T2();
1783 if (insn & (1 << 11)) {
1786 gen_movl_reg_T0(s, rd);
1789 gen_movl_T0_reg(s, rd);
1795 /* load/store byte immediate offset */
1797 rn = (insn >> 3) & 7;
1798 gen_movl_T1_reg(s, rn);
1799 val = (insn >> 6) & 0x1f;
1800 gen_op_movl_T2_im(val);
1801 gen_op_addl_T1_T2();
1803 if (insn & (1 << 11)) {
1805 gen_op_ldub_T0_T1();
1806 gen_movl_reg_T0(s, rd);
1809 gen_movl_T0_reg(s, rd);
1815 /* load/store halfword immediate offset */
1817 rn = (insn >> 3) & 7;
1818 gen_movl_T1_reg(s, rn);
1819 val = (insn >> 5) & 0x3e;
1820 gen_op_movl_T2_im(val);
1821 gen_op_addl_T1_T2();
1823 if (insn & (1 << 11)) {
1825 gen_op_lduw_T0_T1();
1826 gen_movl_reg_T0(s, rd);
1829 gen_movl_T0_reg(s, rd);
1835 /* load/store from stack */
1836 rd = (insn >> 8) & 7;
1837 gen_movl_T1_reg(s, 13);
1838 val = (insn & 0xff) * 4;
1839 gen_op_movl_T2_im(val);
1840 gen_op_addl_T1_T2();
1842 if (insn & (1 << 11)) {
1845 gen_movl_reg_T0(s, rd);
1848 gen_movl_T0_reg(s, rd);
1854 /* add to high reg */
1855 rd = (insn >> 8) & 7;
1856 if (insn & (1 << 11)) {
1858 gen_movl_T0_reg(s, 13);
1860 /* PC. bit 1 is ignored. */
1861 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
1863 val = (insn & 0xff) * 4;
1864 gen_op_movl_T1_im(val);
1865 gen_op_addl_T0_T1();
1866 gen_movl_reg_T0(s, rd);
1871 op = (insn >> 8) & 0xf;
1874 /* adjust stack pointer */
1875 gen_movl_T1_reg(s, 13);
1876 val = (insn & 0x7f) * 4;
1877 if (insn & (1 << 7))
1878 val = -(int32_t)val;
1879 gen_op_movl_T2_im(val);
1880 gen_op_addl_T1_T2();
1881 gen_movl_reg_T1(s, 13);
1884 case 4: case 5: case 0xc: case 0xd:
1886 gen_movl_T1_reg(s, 13);
1887 if (insn & (1 << 8))
1891 for (i = 0; i < 8; i++) {
1892 if (insn & (1 << i))
1895 if ((insn & (1 << 11)) == 0) {
1896 gen_op_movl_T2_im(-offset);
1897 gen_op_addl_T1_T2();
1899 gen_op_movl_T2_im(4);
1900 for (i = 0; i < 8; i++) {
1901 if (insn & (1 << i)) {
1902 if (insn & (1 << 11)) {
1905 gen_movl_reg_T0(s, i);
1908 gen_movl_T0_reg(s, i);
1911 /* advance to the next address. */
1912 gen_op_addl_T1_T2();
1915 if (insn & (1 << 8)) {
1916 if (insn & (1 << 11)) {
1919 /* don't set the pc until the rest of the instruction
1923 gen_movl_T0_reg(s, 14);
1926 gen_op_addl_T1_T2();
1928 if ((insn & (1 << 11)) == 0) {
1929 gen_op_movl_T2_im(-offset);
1930 gen_op_addl_T1_T2();
1932 /* write back the new stack pointer */
1933 gen_movl_reg_T1(s, 13);
1934 /* set the new PC value */
1935 if ((insn & 0x0900) == 0x0900)
1945 /* load/store multiple */
1946 rn = (insn >> 8) & 0x7;
1947 gen_movl_T1_reg(s, rn);
1948 gen_op_movl_T2_im(4);
1949 for (i = 0; i < 8; i++) {
1950 if (insn & (1 << i)) {
1951 if (insn & (1 << 11)) {
1954 gen_movl_reg_T0(s, i);
1957 gen_movl_T0_reg(s, i);
1960 /* advance to the next address */
1961 gen_op_addl_T1_T2();
1964 /* Base register writeback. */
1965 gen_movl_reg_T1(s, rn);
1969 /* conditional branch or swi */
1970 cond = (insn >> 8) & 0xf;
1976 gen_op_movl_T0_im((long)s->pc | 1);
1977 /* Don't set r15. */
1978 gen_op_movl_reg_TN[0][15]();
1980 s->is_jmp = DISAS_JUMP;
1983 /* generate a conditional jump to next instruction */
1984 s->condlabel = gen_new_label();
1985 gen_test_cc[cond ^ 1](s->condlabel);
1987 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1988 //s->is_jmp = DISAS_JUMP_NEXT;
1989 gen_movl_T1_reg(s, 15);
1991 /* jump to the offset */
1992 val = (uint32_t)s->pc + 2;
1993 offset = ((int32_t)insn << 24) >> 24;
1999 /* unconditional branch */
2000 if (insn & (1 << 11))
2001 goto undef; /* Second half of a blx */
2002 val = (uint32_t)s->pc;
2003 offset = ((int32_t)insn << 21) >> 21;
2004 val += (offset << 1) + 2;
2009 /* branch and link [and switch to arm] */
2010 offset = ((int32_t)insn << 21) >> 10;
2012 offset |= insn & 0x7ff;
2014 val = (uint32_t)s->pc + 2;
2015 gen_op_movl_T1_im(val | 1);
2016 gen_movl_reg_T1(s, 14);
2019 if (insn & (1 << 11)) {
2024 val &= ~(uint32_t)2;
2025 gen_op_movl_T0_im(val);
2031 gen_op_movl_T0_im((long)s->pc - 2);
2032 gen_op_movl_reg_TN[0][15]();
2033 gen_op_undef_insn();
2034 s->is_jmp = DISAS_JUMP;
2037 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2038 basic block 'tb'. If search_pc is TRUE, also generate PC
2039 information for each intermediate instruction. */
2040 static inline int gen_intermediate_code_internal(CPUState *env,
2041 TranslationBlock *tb,
2044 DisasContext dc1, *dc = &dc1;
2045 uint16_t *gen_opc_end;
2047 target_ulong pc_start;
2049 /* generate intermediate code */
2054 gen_opc_ptr = gen_opc_buf;
2055 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2056 gen_opparam_ptr = gen_opparam_buf;
2058 dc->is_jmp = DISAS_NEXT;
2060 dc->singlestep_enabled = env->singlestep_enabled;
2062 dc->thumb = env->thumb;
2066 if (env->nb_breakpoints > 0) {
2067 for(j = 0; j < env->nb_breakpoints; j++) {
2068 if (env->breakpoints[j] == dc->pc) {
2069 gen_op_movl_T0_im((long)dc->pc);
2070 gen_op_movl_reg_TN[0][15]();
2072 dc->is_jmp = DISAS_JUMP;
2078 j = gen_opc_ptr - gen_opc_buf;
2082 gen_opc_instr_start[lj++] = 0;
2084 gen_opc_pc[lj] = dc->pc;
2085 gen_opc_instr_start[lj] = 1;
2089 disas_thumb_insn(dc);
2091 disas_arm_insn(env, dc);
2093 if (dc->condjmp && !dc->is_jmp) {
2094 gen_set_label(dc->condlabel);
2097 /* Translation stops when a conditional branch is enoutered.
2098 * Otherwise the subsequent code could get translated several times.
2100 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2101 !env->singlestep_enabled &&
2102 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
2103 /* It this stage dc->condjmp will only be set when the skipped
2104 * instruction was a conditional branch, and teh PC has already been
2106 if (__builtin_expect(env->singlestep_enabled, 0)) {
2107 /* Make sure the pc is updated, and raise a debug exception. */
2110 gen_set_label(dc->condlabel);
2112 if (dc->condjmp || !dc->is_jmp) {
2113 gen_op_movl_T0_im((long)dc->pc);
2114 gen_op_movl_reg_TN[0][15]();
2119 switch(dc->is_jmp) {
2121 gen_op_jmp1((long)dc->tb, (long)dc->pc);
2126 /* indicate that the hash table must be used to find the next TB */
2131 /* nothing more to generate */
2135 gen_set_label(dc->condlabel);
2136 gen_op_jmp1((long)dc->tb, (long)dc->pc);
2140 *gen_opc_ptr = INDEX_op_end;
2143 if (loglevel & CPU_LOG_TB_IN_ASM) {
2144 fprintf(logfile, "----------------\n");
2145 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2146 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2147 fprintf(logfile, "\n");
2148 if (loglevel & (CPU_LOG_TB_OP)) {
2149 fprintf(logfile, "OP:\n");
2150 dump_ops(gen_opc_buf, gen_opparam_buf);
2151 fprintf(logfile, "\n");
2156 tb->size = dc->pc - pc_start;
2160 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2162 return gen_intermediate_code_internal(env, tb, 0);
2165 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2167 return gen_intermediate_code_internal(env, tb, 1);
2170 CPUARMState *cpu_arm_init(void)
2176 env = malloc(sizeof(CPUARMState));
2179 memset(env, 0, sizeof(CPUARMState));
2180 cpu_single_env = env;
2184 void cpu_arm_close(CPUARMState *env)
2189 void cpu_dump_state(CPUState *env, FILE *f,
2190 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2201 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2203 cpu_fprintf(f, "\n");
2205 cpu_fprintf(f, " ");
2207 cpu_fprintf(f, "PSR=%08x %c%c%c%c\n",
2209 env->cpsr & (1 << 31) ? 'N' : '-',
2210 env->cpsr & (1 << 30) ? 'Z' : '-',
2211 env->cpsr & (1 << 29) ? 'C' : '-',
2212 env->cpsr & (1 << 28) ? 'V' : '-');
2214 for (i = 0; i < 16; i++) {
2215 d.d = env->vfp.regs[i];
2218 cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
2219 i * 2, (int)s0.i, s0.s,
2220 i * 2 + 1, (int)s0.i, s0.s,
2221 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2224 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.fpscr);
2227 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
2232 #if defined(CONFIG_USER_ONLY)
2234 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
2235 int is_user, int is_softmmu)
2237 env->cp15_6 = address;
2239 env->exception_index = EXCP_PREFETCH_ABORT;
2241 env->exception_index = EXCP_DATA_ABORT;
2248 #error not implemented