8 static inline void set_feature(CPUARMState *env, int feature)
10 env->features |= 1u << feature;
13 static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
15 env->cp15.c0_cpuid = id;
17 case ARM_CPUID_ARM926:
18 set_feature(env, ARM_FEATURE_VFP);
19 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
20 env->cp15.c0_cachetype = 0x1dd20d2;
21 env->cp15.c1_sys = 0x00090078;
23 case ARM_CPUID_ARM946:
24 set_feature(env, ARM_FEATURE_MPU);
25 env->cp15.c0_cachetype = 0x0f004006;
26 env->cp15.c1_sys = 0x00000078;
28 case ARM_CPUID_ARM1026:
29 set_feature(env, ARM_FEATURE_VFP);
30 set_feature(env, ARM_FEATURE_AUXCR);
31 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
32 env->cp15.c0_cachetype = 0x1dd20d2;
33 env->cp15.c1_sys = 0x00090078;
35 case ARM_CPUID_TI915T:
36 case ARM_CPUID_TI925T:
37 set_feature(env, ARM_FEATURE_OMAPCP);
38 env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring. */
39 env->cp15.c0_cachetype = 0x5109149;
40 env->cp15.c1_sys = 0x00000070;
41 env->cp15.c15_i_max = 0x000;
42 env->cp15.c15_i_min = 0xff0;
44 case ARM_CPUID_PXA250:
45 case ARM_CPUID_PXA255:
46 case ARM_CPUID_PXA260:
47 case ARM_CPUID_PXA261:
48 case ARM_CPUID_PXA262:
49 set_feature(env, ARM_FEATURE_XSCALE);
50 /* JTAG_ID is ((id << 28) | 0x09265013) */
51 env->cp15.c0_cachetype = 0xd172172;
52 env->cp15.c1_sys = 0x00000078;
54 case ARM_CPUID_PXA270_A0:
55 case ARM_CPUID_PXA270_A1:
56 case ARM_CPUID_PXA270_B0:
57 case ARM_CPUID_PXA270_B1:
58 case ARM_CPUID_PXA270_C0:
59 case ARM_CPUID_PXA270_C5:
60 set_feature(env, ARM_FEATURE_XSCALE);
61 /* JTAG_ID is ((id << 28) | 0x09265013) */
62 set_feature(env, ARM_FEATURE_IWMMXT);
63 env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
64 env->cp15.c0_cachetype = 0xd172172;
65 env->cp15.c1_sys = 0x00000078;
68 cpu_abort(env, "Bad CPU ID: %x\n", id);
73 void cpu_reset(CPUARMState *env)
76 id = env->cp15.c0_cpuid;
77 memset(env, 0, offsetof(CPUARMState, breakpoints));
79 cpu_reset_model_id(env, id);
80 #if defined (CONFIG_USER_ONLY)
81 env->uncached_cpsr = ARM_CPU_MODE_USR;
82 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
84 /* SVC mode with interrupts disabled. */
85 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
86 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
92 CPUARMState *cpu_arm_init(void)
96 env = qemu_mallocz(sizeof(CPUARMState));
109 static const struct arm_cpu_t arm_cpu_names[] = {
110 { ARM_CPUID_ARM926, "arm926"},
111 { ARM_CPUID_ARM946, "arm946"},
112 { ARM_CPUID_ARM1026, "arm1026"},
113 { ARM_CPUID_TI925T, "ti925t" },
114 { ARM_CPUID_PXA250, "pxa250" },
115 { ARM_CPUID_PXA255, "pxa255" },
116 { ARM_CPUID_PXA260, "pxa260" },
117 { ARM_CPUID_PXA261, "pxa261" },
118 { ARM_CPUID_PXA262, "pxa262" },
119 { ARM_CPUID_PXA270, "pxa270" },
120 { ARM_CPUID_PXA270_A0, "pxa270-a0" },
121 { ARM_CPUID_PXA270_A1, "pxa270-a1" },
122 { ARM_CPUID_PXA270_B0, "pxa270-b0" },
123 { ARM_CPUID_PXA270_B1, "pxa270-b1" },
124 { ARM_CPUID_PXA270_C0, "pxa270-c0" },
125 { ARM_CPUID_PXA270_C5, "pxa270-c5" },
129 void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
133 (*cpu_fprintf)(f, "Available CPUs:\n");
134 for (i = 0; arm_cpu_names[i].name; i++) {
135 (*cpu_fprintf)(f, " %s\n", arm_cpu_names[i].name);
139 void cpu_arm_set_model(CPUARMState *env, const char *name)
146 for (i = 0; arm_cpu_names[i].name; i++) {
147 if (strcmp(name, arm_cpu_names[i].name) == 0) {
148 id = arm_cpu_names[i].id;
153 cpu_abort(env, "Unknown CPU '%s'", name);
156 cpu_reset_model_id(env, id);
159 void cpu_arm_close(CPUARMState *env)
164 #if defined(CONFIG_USER_ONLY)
166 void do_interrupt (CPUState *env)
168 env->exception_index = -1;
171 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
172 int mmu_idx, int is_softmmu)
175 env->exception_index = EXCP_PREFETCH_ABORT;
176 env->cp15.c6_insn = address;
178 env->exception_index = EXCP_DATA_ABORT;
179 env->cp15.c6_data = address;
184 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
189 /* These should probably raise undefined insn exceptions. */
190 void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
192 int op1 = (insn >> 8) & 0xf;
193 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
197 uint32_t helper_get_cp(CPUState *env, uint32_t insn)
199 int op1 = (insn >> 8) & 0xf;
200 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
204 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
206 cpu_abort(env, "cp15 insn %08x\n", insn);
209 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
211 cpu_abort(env, "cp15 insn %08x\n", insn);
215 void switch_mode(CPUState *env, int mode)
217 if (mode != ARM_CPU_MODE_USR)
218 cpu_abort(env, "Tried to switch out of user mode\n");
223 extern int semihosting_enabled;
225 /* Map CPU modes onto saved register banks. */
226 static inline int bank_number (int mode)
229 case ARM_CPU_MODE_USR:
230 case ARM_CPU_MODE_SYS:
232 case ARM_CPU_MODE_SVC:
234 case ARM_CPU_MODE_ABT:
236 case ARM_CPU_MODE_UND:
238 case ARM_CPU_MODE_IRQ:
240 case ARM_CPU_MODE_FIQ:
243 cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
247 void switch_mode(CPUState *env, int mode)
252 old_mode = env->uncached_cpsr & CPSR_M;
253 if (mode == old_mode)
256 if (old_mode == ARM_CPU_MODE_FIQ) {
257 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
258 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
259 } else if (mode == ARM_CPU_MODE_FIQ) {
260 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
261 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
264 i = bank_number(old_mode);
265 env->banked_r13[i] = env->regs[13];
266 env->banked_r14[i] = env->regs[14];
267 env->banked_spsr[i] = env->spsr;
269 i = bank_number(mode);
270 env->regs[13] = env->banked_r13[i];
271 env->regs[14] = env->banked_r14[i];
272 env->spsr = env->banked_spsr[i];
275 /* Handle a CPU exception. */
276 void do_interrupt(CPUARMState *env)
283 /* TODO: Vectored interrupt controller. */
284 switch (env->exception_index) {
286 new_mode = ARM_CPU_MODE_UND;
295 if (semihosting_enabled) {
296 /* Check for semihosting interrupt. */
298 mask = lduw_code(env->regs[15] - 2) & 0xff;
300 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
302 /* Only intercept calls from privileged modes, to provide some
303 semblance of security. */
304 if (((mask == 0x123456 && !env->thumb)
305 || (mask == 0xab && env->thumb))
306 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
307 env->regs[0] = do_arm_semihosting(env);
311 new_mode = ARM_CPU_MODE_SVC;
314 /* The PC already points to the next instructon. */
317 case EXCP_PREFETCH_ABORT:
319 new_mode = ARM_CPU_MODE_ABT;
321 mask = CPSR_A | CPSR_I;
324 case EXCP_DATA_ABORT:
325 new_mode = ARM_CPU_MODE_ABT;
327 mask = CPSR_A | CPSR_I;
331 new_mode = ARM_CPU_MODE_IRQ;
333 /* Disable IRQ and imprecise data aborts. */
334 mask = CPSR_A | CPSR_I;
338 new_mode = ARM_CPU_MODE_FIQ;
340 /* Disable FIQ, IRQ and imprecise data aborts. */
341 mask = CPSR_A | CPSR_I | CPSR_F;
345 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
346 return; /* Never happens. Keep compiler happy. */
349 if (env->cp15.c1_sys & (1 << 13)) {
352 switch_mode (env, new_mode);
353 env->spsr = cpsr_read(env);
354 /* Switch to the new mode, and switch to Arm mode. */
355 /* ??? Thumb interrupt handlers not implemented. */
356 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
357 env->uncached_cpsr |= mask;
359 env->regs[14] = env->regs[15] + offset;
360 env->regs[15] = addr;
361 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
364 /* Check section/page access permissions.
365 Returns the page protection flags, or zero if the access is not
367 static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
371 return PAGE_READ | PAGE_WRITE;
375 if (access_type == 1)
377 switch ((env->cp15.c1_sys >> 8) & 3) {
379 return is_user ? 0 : PAGE_READ;
386 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
389 return (access_type == 1) ? 0 : PAGE_READ;
391 return PAGE_READ | PAGE_WRITE;
393 return PAGE_READ | PAGE_WRITE;
399 static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
400 int is_user, uint32_t *phys_ptr, int *prot)
410 /* Fast Context Switch Extension. */
411 if (address < 0x02000000)
412 address += env->cp15.c13_fcse;
414 if ((env->cp15.c1_sys & 1) == 0) {
415 /* MMU/MPU disabled. */
417 *prot = PAGE_READ | PAGE_WRITE;
418 } else if (arm_feature(env, ARM_FEATURE_MPU)) {
424 for (n = 7; n >= 0; n--) {
425 base = env->cp15.c6_region[n];
428 mask = 1 << ((base >> 1) & 0x1f);
429 /* Keep this shift separate from the above to avoid an
430 (undefined) << 32. */
431 mask = (mask << 1) - 1;
432 if (((base ^ address) & ~mask) == 0)
438 if (access_type == 2) {
439 mask = env->cp15.c5_insn;
441 mask = env->cp15.c5_data;
443 mask = (mask >> (n * 4)) & 0xf;
450 *prot = PAGE_READ | PAGE_WRITE;
458 *prot = PAGE_READ | PAGE_WRITE;
469 /* Bad permission. */
473 /* Pagetable walk. */
474 /* Lookup l1 descriptor. */
475 table = (env->cp15.c2_base & 0xffffc000) | ((address >> 18) & 0x3ffc);
476 desc = ldl_phys(table);
478 domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
480 /* Secton translation fault. */
484 if (domain == 0 || domain == 2) {
486 code = 9; /* Section domain fault. */
488 code = 11; /* Page domain fault. */
493 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
494 ap = (desc >> 10) & 3;
497 /* Lookup l2 entry. */
499 /* Coarse pagetable. */
500 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
502 /* Fine pagetable. */
503 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
505 desc = ldl_phys(table);
507 case 0: /* Page translation fault. */
510 case 1: /* 64k page. */
511 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
512 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
514 case 2: /* 4k page. */
515 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
516 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
518 case 3: /* 1k page. */
520 if (arm_feature(env, ARM_FEATURE_XSCALE))
521 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
523 /* Page translation fault. */
528 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
529 ap = (desc >> 4) & 3;
532 /* Never happens, but compiler isn't smart enough to tell. */
537 *prot = check_ap(env, ap, domain, access_type, is_user);
539 /* Access permission fault. */
542 *phys_ptr = phys_addr;
546 return code | (domain << 4);
549 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
550 int access_type, int mmu_idx, int is_softmmu)
556 is_user = mmu_idx == MMU_USER_IDX;
557 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
559 /* Map a single [sub]page. */
560 phys_addr &= ~(uint32_t)0x3ff;
561 address &= ~(uint32_t)0x3ff;
562 return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
566 if (access_type == 2) {
567 env->cp15.c5_insn = ret;
568 env->cp15.c6_insn = address;
569 env->exception_index = EXCP_PREFETCH_ABORT;
571 env->cp15.c5_data = ret;
572 env->cp15.c6_data = address;
573 env->exception_index = EXCP_DATA_ABORT;
578 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
584 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
592 void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
594 int cp_num = (insn >> 8) & 0xf;
595 int cp_info = (insn >> 5) & 7;
596 int src = (insn >> 16) & 0xf;
597 int operand = insn & 0xf;
599 if (env->cp[cp_num].cp_write)
600 env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
601 cp_info, src, operand, val);
604 uint32_t helper_get_cp(CPUState *env, uint32_t insn)
606 int cp_num = (insn >> 8) & 0xf;
607 int cp_info = (insn >> 5) & 7;
608 int dest = (insn >> 16) & 0xf;
609 int operand = insn & 0xf;
611 if (env->cp[cp_num].cp_read)
612 return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
613 cp_info, dest, operand);
617 /* Return basic MPU access permission bits. */
618 static uint32_t simple_mpu_ap_bits(uint32_t val)
625 for (i = 0; i < 16; i += 2) {
626 ret |= (val >> i) & mask;
632 /* Pad basic MPU access permission bits to extended format. */
633 static uint32_t extended_mpu_ap_bits(uint32_t val)
640 for (i = 0; i < 16; i += 2) {
641 ret |= (val & mask) << i;
647 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
652 op2 = (insn >> 5) & 7;
654 switch ((insn >> 16) & 0xf) {
655 case 0: /* ID codes. */
656 if (arm_feature(env, ARM_FEATURE_XSCALE))
658 if (arm_feature(env, ARM_FEATURE_OMAPCP))
661 case 1: /* System configuration. */
662 if (arm_feature(env, ARM_FEATURE_OMAPCP))
666 if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
667 env->cp15.c1_sys = val;
668 /* ??? Lots of these bits are not implemented. */
669 /* This may enable/disable the MMU, so do a TLB flush. */
673 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
674 env->cp15.c1_xscaleauxcr = val;
679 if (arm_feature(env, ARM_FEATURE_XSCALE))
681 env->cp15.c1_coproc = val;
682 /* ??? Is this safe when called from within a TB? */
689 case 2: /* MMU Page table control / MPU cache control. */
690 if (arm_feature(env, ARM_FEATURE_MPU)) {
693 env->cp15.c2_data = val;
696 env->cp15.c2_insn = val;
702 env->cp15.c2_base = val;
705 case 3: /* MMU Domain access control / MPU write buffer control. */
707 tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
709 case 4: /* Reserved. */
711 case 5: /* MMU Fault status / MPU access permission. */
712 if (arm_feature(env, ARM_FEATURE_OMAPCP))
716 if (arm_feature(env, ARM_FEATURE_MPU))
717 val = extended_mpu_ap_bits(val);
718 env->cp15.c5_data = val;
721 if (arm_feature(env, ARM_FEATURE_MPU))
722 val = extended_mpu_ap_bits(val);
723 env->cp15.c5_insn = val;
726 if (!arm_feature(env, ARM_FEATURE_MPU))
728 env->cp15.c5_data = val;
731 if (!arm_feature(env, ARM_FEATURE_MPU))
733 env->cp15.c5_insn = val;
739 case 6: /* MMU Fault address / MPU base/size. */
740 if (arm_feature(env, ARM_FEATURE_MPU)) {
743 env->cp15.c6_region[crm] = val;
745 if (arm_feature(env, ARM_FEATURE_OMAPCP))
749 env->cp15.c6_data = val;
752 env->cp15.c6_insn = val;
759 case 7: /* Cache control. */
760 env->cp15.c15_i_max = 0x000;
761 env->cp15.c15_i_min = 0xff0;
762 /* No cache, so nothing to do. */
764 case 8: /* MMU TLB control. */
766 case 0: /* Invalidate all. */
769 case 1: /* Invalidate single TLB entry. */
771 /* ??? This is wrong for large pages and sections. */
772 /* As an ugly hack to make linux work we always flush a 4K
775 tlb_flush_page(env, val);
776 tlb_flush_page(env, val + 0x400);
777 tlb_flush_page(env, val + 0x800);
778 tlb_flush_page(env, val + 0xc00);
788 if (arm_feature(env, ARM_FEATURE_OMAPCP))
791 case 0: /* Cache lockdown. */
794 env->cp15.c9_data = val;
797 env->cp15.c9_insn = val;
803 case 1: /* TCM memory region registers. */
804 /* Not implemented. */
810 case 10: /* MMU TLB lockdown. */
811 /* ??? TLB lockdown not implemented. */
813 case 12: /* Reserved. */
815 case 13: /* Process ID. */
818 /* Unlike real hardware the qemu TLB uses virtual addresses,
819 not modified virtual addresses, so this causes a TLB flush.
821 if (env->cp15.c13_fcse != val)
823 env->cp15.c13_fcse = val;
826 /* This changes the ASID, so do a TLB flush. */
827 if (env->cp15.c13_context != val
828 && !arm_feature(env, ARM_FEATURE_MPU))
830 env->cp15.c13_context = val;
836 case 14: /* Reserved. */
838 case 15: /* Implementation specific. */
839 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
840 if (op2 == 0 && crm == 1) {
841 if (env->cp15.c15_cpar != (val & 0x3fff)) {
842 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
844 env->cp15.c15_cpar = val & 0x3fff;
850 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
854 case 1: /* Set TI925T configuration. */
855 env->cp15.c15_ticonfig = val & 0xe7;
856 env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
857 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
859 case 2: /* Set I_max. */
860 env->cp15.c15_i_max = val;
862 case 3: /* Set I_min. */
863 env->cp15.c15_i_min = val;
865 case 4: /* Set thread-ID. */
866 env->cp15.c15_threadid = val & 0xffff;
868 case 8: /* Wait-for-interrupt (deprecated). */
869 cpu_interrupt(env, CPU_INTERRUPT_HALT);
879 /* ??? For debugging only. Should raise illegal instruction exception. */
880 cpu_abort(env, "Unimplemented cp15 register write\n");
883 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
888 op2 = (insn >> 5) & 7;
890 switch ((insn >> 16) & 0xf) {
891 case 0: /* ID codes. */
893 default: /* Device ID. */
894 return env->cp15.c0_cpuid;
895 case 1: /* Cache Type. */
896 return env->cp15.c0_cachetype;
897 case 2: /* TCM status. */
898 if (arm_feature(env, ARM_FEATURE_XSCALE))
902 case 1: /* System configuration. */
903 if (arm_feature(env, ARM_FEATURE_OMAPCP))
906 case 0: /* Control register. */
907 return env->cp15.c1_sys;
908 case 1: /* Auxiliary control register. */
909 if (arm_feature(env, ARM_FEATURE_AUXCR))
911 if (arm_feature(env, ARM_FEATURE_XSCALE))
912 return env->cp15.c1_xscaleauxcr;
914 case 2: /* Coprocessor access register. */
915 if (arm_feature(env, ARM_FEATURE_XSCALE))
917 return env->cp15.c1_coproc;
921 case 2: /* MMU Page table control / MPU cache control. */
922 if (arm_feature(env, ARM_FEATURE_MPU)) {
925 return env->cp15.c2_data;
928 return env->cp15.c2_insn;
934 return env->cp15.c2_base;
936 case 3: /* MMU Domain access control / MPU write buffer control. */
938 case 4: /* Reserved. */
940 case 5: /* MMU Fault status / MPU access permission. */
941 if (arm_feature(env, ARM_FEATURE_OMAPCP))
945 if (arm_feature(env, ARM_FEATURE_MPU))
946 return simple_mpu_ap_bits(env->cp15.c5_data);
947 return env->cp15.c5_data;
949 if (arm_feature(env, ARM_FEATURE_MPU))
950 return simple_mpu_ap_bits(env->cp15.c5_data);
951 return env->cp15.c5_insn;
953 if (!arm_feature(env, ARM_FEATURE_MPU))
955 return env->cp15.c5_data;
957 if (!arm_feature(env, ARM_FEATURE_MPU))
959 return env->cp15.c5_insn;
963 case 6: /* MMU Fault address / MPU base/size. */
964 if (arm_feature(env, ARM_FEATURE_MPU)) {
969 return env->cp15.c6_region[n];
971 if (arm_feature(env, ARM_FEATURE_OMAPCP))
975 return env->cp15.c6_data;
977 /* Arm9 doesn't have an IFAR, but implementing it anyway
978 shouldn't do any harm. */
979 return env->cp15.c6_insn;
984 case 7: /* Cache control. */
985 /* ??? This is for test, clean and invaidate operations that set the
986 Z flag. We can't represent N = Z = 1, so it also clears
987 the N flag. Oh well. */
990 case 8: /* MMU TLB control. */
992 case 9: /* Cache lockdown. */
993 if (arm_feature(env, ARM_FEATURE_OMAPCP))
997 return env->cp15.c9_data;
999 return env->cp15.c9_insn;
1003 case 10: /* MMU TLB lockdown. */
1004 /* ??? TLB lockdown not implemented. */
1006 case 11: /* TCM DMA control. */
1007 case 12: /* Reserved. */
1009 case 13: /* Process ID. */
1012 return env->cp15.c13_fcse;
1014 return env->cp15.c13_context;
1018 case 14: /* Reserved. */
1020 case 15: /* Implementation specific. */
1021 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1022 if (op2 == 0 && crm == 1)
1023 return env->cp15.c15_cpar;
1027 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1031 case 1: /* Read TI925T configuration. */
1032 return env->cp15.c15_ticonfig;
1033 case 2: /* Read I_max. */
1034 return env->cp15.c15_i_max;
1035 case 3: /* Read I_min. */
1036 return env->cp15.c15_i_min;
1037 case 4: /* Read thread-ID. */
1038 return env->cp15.c15_threadid;
1039 case 8: /* TI925T_status */
1047 /* ??? For debugging only. Should raise illegal instruction exception. */
1048 cpu_abort(env, "Unimplemented cp15 register read\n");
1052 void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
1053 ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
1056 if (cpnum < 0 || cpnum > 14) {
1057 cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
1061 env->cp[cpnum].cp_read = cp_read;
1062 env->cp[cpnum].cp_write = cp_write;
1063 env->cp[cpnum].opaque = opaque;