8 static inline void set_feature(CPUARMState *env, int feature)
10 env->features |= 1u << feature;
13 static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
15 env->cp15.c0_cpuid = id;
17 case ARM_CPUID_ARM926:
18 set_feature(env, ARM_FEATURE_VFP);
19 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
21 case ARM_CPUID_ARM1026:
22 set_feature(env, ARM_FEATURE_VFP);
23 set_feature(env, ARM_FEATURE_AUXCR);
24 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
27 cpu_abort(env, "Bad CPU ID: %x\n", id);
32 void cpu_reset(CPUARMState *env)
35 id = env->cp15.c0_cpuid;
36 memset(env, 0, offsetof(CPUARMState, breakpoints));
38 cpu_reset_model_id(env, id);
39 #if defined (CONFIG_USER_ONLY)
40 env->uncached_cpsr = ARM_CPU_MODE_USR;
41 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
43 /* SVC mode with interrupts disabled. */
44 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
45 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
51 CPUARMState *cpu_arm_init(void)
55 env = qemu_mallocz(sizeof(CPUARMState));
68 static const struct arm_cpu_t arm_cpu_names[] = {
69 { ARM_CPUID_ARM926, "arm926"},
70 { ARM_CPUID_ARM1026, "arm1026"},
74 void arm_cpu_list(void)
78 printf ("Available CPUs:\n");
79 for (i = 0; arm_cpu_names[i].name; i++) {
80 printf(" %s\n", arm_cpu_names[i].name);
84 void cpu_arm_set_model(CPUARMState *env, const char *name)
91 for (i = 0; arm_cpu_names[i].name; i++) {
92 if (strcmp(name, arm_cpu_names[i].name) == 0) {
93 id = arm_cpu_names[i].id;
98 cpu_abort(env, "Unknown CPU '%s'", name);
101 cpu_reset_model_id(env, id);
104 void cpu_arm_close(CPUARMState *env)
109 #if defined(CONFIG_USER_ONLY)
111 void do_interrupt (CPUState *env)
113 env->exception_index = -1;
116 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
117 int is_user, int is_softmmu)
120 env->exception_index = EXCP_PREFETCH_ABORT;
121 env->cp15.c6_insn = address;
123 env->exception_index = EXCP_DATA_ABORT;
124 env->cp15.c6_data = address;
129 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
134 /* These should probably raise undefined insn exceptions. */
135 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
137 cpu_abort(env, "cp15 insn %08x\n", insn);
140 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
142 cpu_abort(env, "cp15 insn %08x\n", insn);
146 void switch_mode(CPUState *env, int mode)
148 if (mode != ARM_CPU_MODE_USR)
149 cpu_abort(env, "Tried to switch out of user mode\n");
154 extern int semihosting_enabled;
156 /* Map CPU modes onto saved register banks. */
157 static inline int bank_number (int mode)
160 case ARM_CPU_MODE_USR:
161 case ARM_CPU_MODE_SYS:
163 case ARM_CPU_MODE_SVC:
165 case ARM_CPU_MODE_ABT:
167 case ARM_CPU_MODE_UND:
169 case ARM_CPU_MODE_IRQ:
171 case ARM_CPU_MODE_FIQ:
174 cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
178 void switch_mode(CPUState *env, int mode)
183 old_mode = env->uncached_cpsr & CPSR_M;
184 if (mode == old_mode)
187 if (old_mode == ARM_CPU_MODE_FIQ) {
188 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
189 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
190 } else if (mode == ARM_CPU_MODE_FIQ) {
191 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
192 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
195 i = bank_number(old_mode);
196 env->banked_r13[i] = env->regs[13];
197 env->banked_r14[i] = env->regs[14];
198 env->banked_spsr[i] = env->spsr;
200 i = bank_number(mode);
201 env->regs[13] = env->banked_r13[i];
202 env->regs[14] = env->banked_r14[i];
203 env->spsr = env->banked_spsr[i];
206 /* Handle a CPU exception. */
207 void do_interrupt(CPUARMState *env)
214 /* TODO: Vectored interrupt controller. */
215 switch (env->exception_index) {
217 new_mode = ARM_CPU_MODE_UND;
226 if (semihosting_enabled) {
227 /* Check for semihosting interrupt. */
229 mask = lduw_code(env->regs[15] - 2) & 0xff;
231 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
233 /* Only intercept calls from privileged modes, to provide some
234 semblance of security. */
235 if (((mask == 0x123456 && !env->thumb)
236 || (mask == 0xab && env->thumb))
237 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
238 env->regs[0] = do_arm_semihosting(env);
242 new_mode = ARM_CPU_MODE_SVC;
245 /* The PC already points to the next instructon. */
248 case EXCP_PREFETCH_ABORT:
250 new_mode = ARM_CPU_MODE_ABT;
252 mask = CPSR_A | CPSR_I;
255 case EXCP_DATA_ABORT:
256 new_mode = ARM_CPU_MODE_ABT;
258 mask = CPSR_A | CPSR_I;
262 new_mode = ARM_CPU_MODE_IRQ;
264 /* Disable IRQ and imprecise data aborts. */
265 mask = CPSR_A | CPSR_I;
269 new_mode = ARM_CPU_MODE_FIQ;
271 /* Disable FIQ, IRQ and imprecise data aborts. */
272 mask = CPSR_A | CPSR_I | CPSR_F;
276 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
277 return; /* Never happens. Keep compiler happy. */
280 if (env->cp15.c1_sys & (1 << 13)) {
283 switch_mode (env, new_mode);
284 env->spsr = cpsr_read(env);
285 /* Switch to the new mode, and switch to Arm mode. */
286 /* ??? Thumb interrupt handlers not implemented. */
287 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
288 env->uncached_cpsr |= mask;
290 env->regs[14] = env->regs[15] + offset;
291 env->regs[15] = addr;
292 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
295 /* Check section/page access permissions.
296 Returns the page protection flags, or zero if the access is not
298 static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
302 return PAGE_READ | PAGE_WRITE;
306 if (access_type == 1)
308 switch ((env->cp15.c1_sys >> 8) & 3) {
310 return is_user ? 0 : PAGE_READ;
317 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
320 return (access_type == 1) ? 0 : PAGE_READ;
322 return PAGE_READ | PAGE_WRITE;
324 return PAGE_READ | PAGE_WRITE;
330 static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
331 int is_user, uint32_t *phys_ptr, int *prot)
341 /* Fast Context Switch Extension. */
342 if (address < 0x02000000)
343 address += env->cp15.c13_fcse;
345 if ((env->cp15.c1_sys & 1) == 0) {
348 *prot = PAGE_READ | PAGE_WRITE;
350 /* Pagetable walk. */
351 /* Lookup l1 descriptor. */
352 table = (env->cp15.c2 & 0xffffc000) | ((address >> 18) & 0x3ffc);
353 desc = ldl_phys(table);
355 domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
357 /* Secton translation fault. */
361 if (domain == 0 || domain == 2) {
363 code = 9; /* Section domain fault. */
365 code = 11; /* Page domain fault. */
370 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
371 ap = (desc >> 10) & 3;
374 /* Lookup l2 entry. */
375 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
376 desc = ldl_phys(table);
378 case 0: /* Page translation fault. */
381 case 1: /* 64k page. */
382 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
383 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
385 case 2: /* 4k page. */
386 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
387 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
389 case 3: /* 1k page. */
391 /* Page translation fault. */
395 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
396 ap = (desc >> 4) & 3;
399 /* Never happens, but compiler isn't smart enough to tell. */
404 *prot = check_ap(env, ap, domain, access_type, is_user);
406 /* Access permission fault. */
409 *phys_ptr = phys_addr;
413 return code | (domain << 4);
416 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
417 int access_type, int is_user, int is_softmmu)
423 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
425 /* Map a single [sub]page. */
426 phys_addr &= ~(uint32_t)0x3ff;
427 address &= ~(uint32_t)0x3ff;
428 return tlb_set_page (env, address, phys_addr, prot, is_user,
432 if (access_type == 2) {
433 env->cp15.c5_insn = ret;
434 env->cp15.c6_insn = address;
435 env->exception_index = EXCP_PREFETCH_ABORT;
437 env->cp15.c5_data = ret;
438 env->cp15.c6_data = address;
439 env->exception_index = EXCP_DATA_ABORT;
444 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
450 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
458 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
462 op2 = (insn >> 5) & 7;
463 switch ((insn >> 16) & 0xf) {
464 case 0: /* ID codes. */
466 case 1: /* System configuration. */
469 env->cp15.c1_sys = val;
470 /* ??? Lots of these bits are not implemented. */
471 /* This may enable/disable the MMU, so do a TLB flush. */
475 env->cp15.c1_coproc = val;
476 /* ??? Is this safe when called from within a TB? */
482 case 2: /* MMU Page table control. */
485 case 3: /* MMU Domain access control. */
488 case 4: /* Reserved. */
490 case 5: /* MMU Fault status. */
493 env->cp15.c5_data = val;
496 env->cp15.c5_insn = val;
502 case 6: /* MMU Fault address. */
505 env->cp15.c6_data = val;
508 env->cp15.c6_insn = val;
514 case 7: /* Cache control. */
515 /* No cache, so nothing to do. */
517 case 8: /* MMU TLB control. */
519 case 0: /* Invalidate all. */
522 case 1: /* Invalidate single TLB entry. */
524 /* ??? This is wrong for large pages and sections. */
525 /* As an ugly hack to make linux work we always flush a 4K
528 tlb_flush_page(env, val);
529 tlb_flush_page(env, val + 0x400);
530 tlb_flush_page(env, val + 0x800);
531 tlb_flush_page(env, val + 0xc00);
540 case 9: /* Cache lockdown. */
543 env->cp15.c9_data = val;
546 env->cp15.c9_insn = val;
552 case 10: /* MMU TLB lockdown. */
553 /* ??? TLB lockdown not implemented. */
555 case 11: /* TCM DMA control. */
556 case 12: /* Reserved. */
558 case 13: /* Process ID. */
561 /* Unlike real hardware the qemu TLB uses virtual addresses,
562 not modified virtual addresses, so this causes a TLB flush.
564 if (env->cp15.c13_fcse != val)
566 env->cp15.c13_fcse = val;
569 /* This changes the ASID, so do a TLB flush. */
570 if (env->cp15.c13_context != val)
572 env->cp15.c13_context = val;
578 case 14: /* Reserved. */
580 case 15: /* Implementation specific. */
581 /* ??? Internal registers not implemented. */
586 /* ??? For debugging only. Should raise illegal instruction exception. */
587 cpu_abort(env, "Unimplemented cp15 register read\n");
590 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
594 op2 = (insn >> 5) & 7;
595 switch ((insn >> 16) & 0xf) {
596 case 0: /* ID codes. */
598 default: /* Device ID. */
599 return env->cp15.c0_cpuid;
600 case 1: /* Cache Type. */
602 case 2: /* TCM status. */
605 case 1: /* System configuration. */
607 case 0: /* Control register. */
608 return env->cp15.c1_sys;
609 case 1: /* Auxiliary control register. */
610 if (arm_feature(env, ARM_FEATURE_AUXCR))
613 case 2: /* Coprocessor access register. */
614 return env->cp15.c1_coproc;
618 case 2: /* MMU Page table control. */
620 case 3: /* MMU Domain access control. */
622 case 4: /* Reserved. */
624 case 5: /* MMU Fault status. */
627 return env->cp15.c5_data;
629 return env->cp15.c5_insn;
633 case 6: /* MMU Fault address. */
636 return env->cp15.c6_data;
638 /* Arm9 doesn't have an IFAR, but implementing it anyway shouldn't
640 return env->cp15.c6_insn;
644 case 7: /* Cache control. */
645 /* ??? This is for test, clean and invaidate operations that set the
646 Z flag. We can't represent N = Z = 1, so it also clears clears
647 the N flag. Oh well. */
650 case 8: /* MMU TLB control. */
652 case 9: /* Cache lockdown. */
655 return env->cp15.c9_data;
657 return env->cp15.c9_insn;
661 case 10: /* MMU TLB lockdown. */
662 /* ??? TLB lockdown not implemented. */
664 case 11: /* TCM DMA control. */
665 case 12: /* Reserved. */
667 case 13: /* Process ID. */
670 return env->cp15.c13_fcse;
672 return env->cp15.c13_context;
676 case 14: /* Reserved. */
678 case 15: /* Implementation specific. */
679 /* ??? Internal registers not implemented. */
683 /* ??? For debugging only. Should raise illegal instruction exception. */
684 cpu_abort(env, "Unimplemented cp15 register read\n");