2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 //#define DEBUG_TB_INVALIDATE
37 /* make various TB consistency checks */
38 //#define DEBUG_TB_CHECK
39 //#define DEBUG_TLB_CHECK
41 /* threshold to flush the translated code buffer */
42 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
44 #define SMC_BITMAP_USE_THRESHOLD 10
46 #define MMAP_AREA_START 0x00000000
47 #define MMAP_AREA_END 0xa8000000
49 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
50 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
51 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
53 /* any access to the tbs or the page table must use this lock */
54 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
56 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
57 uint8_t *code_gen_ptr;
61 uint8_t *phys_ram_base;
63 typedef struct PageDesc {
64 /* offset in memory of the page + io_index in the low 12 bits */
65 unsigned long phys_offset;
66 /* list of TBs intersecting this physical page */
67 TranslationBlock *first_tb;
68 /* in order to optimize self modifying code, we count the number
69 of lookups we do to a given page to use a bitmap */
70 unsigned int code_write_count;
72 #if defined(CONFIG_USER_ONLY)
77 typedef struct VirtPageDesc {
78 /* physical address of code page. It is valid only if 'valid_tag'
79 matches 'virt_valid_tag' */
80 target_ulong phys_addr;
81 unsigned int valid_tag;
82 #if !defined(CONFIG_SOFTMMU)
83 /* original page access rights. It is valid only if 'valid_tag'
84 matches 'virt_valid_tag' */
90 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
92 #define L1_SIZE (1 << L1_BITS)
93 #define L2_SIZE (1 << L2_BITS)
95 static void io_mem_init(void);
97 unsigned long real_host_page_size;
98 unsigned long host_page_bits;
99 unsigned long host_page_size;
100 unsigned long host_page_mask;
102 static PageDesc *l1_map[L1_SIZE];
104 #if !defined(CONFIG_USER_ONLY)
105 static VirtPageDesc *l1_virt_map[L1_SIZE];
106 static unsigned int virt_valid_tag;
109 /* io memory support */
110 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
111 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
112 static int io_mem_nb;
115 char *logfilename = "/tmp/qemu.log";
119 static void page_init(void)
121 /* NOTE: we can always suppose that host_page_size >=
123 real_host_page_size = getpagesize();
124 if (host_page_size == 0)
125 host_page_size = real_host_page_size;
126 if (host_page_size < TARGET_PAGE_SIZE)
127 host_page_size = TARGET_PAGE_SIZE;
129 while ((1 << host_page_bits) < host_page_size)
131 host_page_mask = ~(host_page_size - 1);
132 #if !defined(CONFIG_USER_ONLY)
137 static inline PageDesc *page_find_alloc(unsigned int index)
141 lp = &l1_map[index >> L2_BITS];
144 /* allocate if not found */
145 p = malloc(sizeof(PageDesc) * L2_SIZE);
146 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
149 return p + (index & (L2_SIZE - 1));
152 static inline PageDesc *page_find(unsigned int index)
156 p = l1_map[index >> L2_BITS];
159 return p + (index & (L2_SIZE - 1));
162 #if !defined(CONFIG_USER_ONLY)
163 static void tlb_protect_code(CPUState *env, uint32_t addr);
164 static void tlb_unprotect_code(CPUState *env, uint32_t addr);
165 static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr);
167 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
169 VirtPageDesc **lp, *p;
171 lp = &l1_virt_map[index >> L2_BITS];
174 /* allocate if not found */
175 p = malloc(sizeof(VirtPageDesc) * L2_SIZE);
176 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
179 return p + (index & (L2_SIZE - 1));
182 static inline VirtPageDesc *virt_page_find(unsigned int index)
186 p = l1_virt_map[index >> L2_BITS];
189 return p + (index & (L2_SIZE - 1));
192 static void virt_page_flush(void)
199 if (virt_valid_tag == 0) {
201 for(i = 0; i < L1_SIZE; i++) {
204 for(j = 0; j < L2_SIZE; j++)
211 static void virt_page_flush(void)
216 void cpu_exec_init(void)
219 code_gen_ptr = code_gen_buffer;
225 static inline void invalidate_page_bitmap(PageDesc *p)
227 if (p->code_bitmap) {
228 free(p->code_bitmap);
229 p->code_bitmap = NULL;
231 p->code_write_count = 0;
234 /* set to NULL all the 'first_tb' fields in all PageDescs */
235 static void page_flush_tb(void)
240 for(i = 0; i < L1_SIZE; i++) {
243 for(j = 0; j < L2_SIZE; j++) {
245 invalidate_page_bitmap(p);
252 /* flush all the translation blocks */
253 /* XXX: tb_flush is currently not thread safe */
254 void tb_flush(CPUState *env)
257 #if defined(DEBUG_FLUSH)
258 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
259 code_gen_ptr - code_gen_buffer,
261 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
263 /* must reset current TB so that interrupts cannot modify the
264 links while we are modifying them */
265 env->current_tb = NULL;
268 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
272 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
273 tb_phys_hash[i] = NULL;
276 code_gen_ptr = code_gen_buffer;
277 /* XXX: flush processor icache at this point if cache flush is
281 #ifdef DEBUG_TB_CHECK
283 static void tb_invalidate_check(unsigned long address)
285 TranslationBlock *tb;
287 address &= TARGET_PAGE_MASK;
288 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
289 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
290 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
291 address >= tb->pc + tb->size)) {
292 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
293 address, tb->pc, tb->size);
299 /* verify that all the pages have correct rights for code */
300 static void tb_page_check(void)
302 TranslationBlock *tb;
303 int i, flags1, flags2;
305 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
306 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
307 flags1 = page_get_flags(tb->pc);
308 flags2 = page_get_flags(tb->pc + tb->size - 1);
309 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
310 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
311 tb->pc, tb->size, flags1, flags2);
317 void tb_jmp_check(TranslationBlock *tb)
319 TranslationBlock *tb1;
322 /* suppress any remaining jumps to this TB */
326 tb1 = (TranslationBlock *)((long)tb1 & ~3);
329 tb1 = tb1->jmp_next[n1];
331 /* check end of list */
333 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
339 /* invalidate one TB */
340 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
343 TranslationBlock *tb1;
347 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
350 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
354 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
356 TranslationBlock *tb1;
362 tb1 = (TranslationBlock *)((long)tb1 & ~3);
364 *ptb = tb1->page_next[n1];
367 ptb = &tb1->page_next[n1];
371 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
373 TranslationBlock *tb1, **ptb;
376 ptb = &tb->jmp_next[n];
379 /* find tb(n) in circular list */
383 tb1 = (TranslationBlock *)((long)tb1 & ~3);
384 if (n1 == n && tb1 == tb)
387 ptb = &tb1->jmp_first;
389 ptb = &tb1->jmp_next[n1];
392 /* now we can suppress tb(n) from the list */
393 *ptb = tb->jmp_next[n];
395 tb->jmp_next[n] = NULL;
399 /* reset the jump entry 'n' of a TB so that it is not chained to
401 static inline void tb_reset_jump(TranslationBlock *tb, int n)
403 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
406 static inline void tb_invalidate(TranslationBlock *tb)
409 TranslationBlock *tb1, *tb2, **ptb;
411 tb_invalidated_flag = 1;
413 /* remove the TB from the hash list */
414 h = tb_hash_func(tb->pc);
418 /* NOTE: the TB is not necessarily linked in the hash. It
419 indicates that it is not currently used */
423 *ptb = tb1->hash_next;
426 ptb = &tb1->hash_next;
429 /* suppress this TB from the two jump lists */
430 tb_jmp_remove(tb, 0);
431 tb_jmp_remove(tb, 1);
433 /* suppress any remaining jumps to this TB */
439 tb1 = (TranslationBlock *)((long)tb1 & ~3);
440 tb2 = tb1->jmp_next[n1];
441 tb_reset_jump(tb1, n1);
442 tb1->jmp_next[n1] = NULL;
445 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
448 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
452 target_ulong phys_pc;
454 /* remove the TB from the hash list */
455 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
456 h = tb_phys_hash_func(phys_pc);
457 tb_remove(&tb_phys_hash[h], tb,
458 offsetof(TranslationBlock, phys_hash_next));
460 /* remove the TB from the page list */
461 if (tb->page_addr[0] != page_addr) {
462 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
463 tb_page_remove(&p->first_tb, tb);
464 invalidate_page_bitmap(p);
466 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
467 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
468 tb_page_remove(&p->first_tb, tb);
469 invalidate_page_bitmap(p);
475 static inline void set_bits(uint8_t *tab, int start, int len)
481 mask = 0xff << (start & 7);
482 if ((start & ~7) == (end & ~7)) {
484 mask &= ~(0xff << (end & 7));
489 start = (start + 8) & ~7;
491 while (start < end1) {
496 mask = ~(0xff << (end & 7));
502 static void build_page_bitmap(PageDesc *p)
504 int n, tb_start, tb_end;
505 TranslationBlock *tb;
507 p->code_bitmap = malloc(TARGET_PAGE_SIZE / 8);
510 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
515 tb = (TranslationBlock *)((long)tb & ~3);
516 /* NOTE: this is subtle as a TB may span two physical pages */
518 /* NOTE: tb_end may be after the end of the page, but
519 it is not a problem */
520 tb_start = tb->pc & ~TARGET_PAGE_MASK;
521 tb_end = tb_start + tb->size;
522 if (tb_end > TARGET_PAGE_SIZE)
523 tb_end = TARGET_PAGE_SIZE;
526 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
528 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
529 tb = tb->page_next[n];
533 /* invalidate all TBs which intersect with the target physical page
534 starting in range [start;end[. NOTE: start and end must refer to
535 the same physical page */
536 static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end)
540 TranslationBlock *tb, *tb_next;
541 target_ulong tb_start, tb_end;
543 p = page_find(start >> TARGET_PAGE_BITS);
546 if (!p->code_bitmap &&
547 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
548 /* build code bitmap */
549 build_page_bitmap(p);
552 /* we remove all the TBs in the range [start, end[ */
553 /* XXX: see if in some cases it could be faster to invalidate all the code */
557 tb = (TranslationBlock *)((long)tb & ~3);
558 tb_next = tb->page_next[n];
559 /* NOTE: this is subtle as a TB may span two physical pages */
561 /* NOTE: tb_end may be after the end of the page, but
562 it is not a problem */
563 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
564 tb_end = tb_start + tb->size;
566 tb_start = tb->page_addr[1];
567 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
569 if (!(tb_end <= start || tb_start >= end)) {
570 tb_phys_invalidate(tb, -1);
574 #if !defined(CONFIG_USER_ONLY)
575 /* if no code remaining, no need to continue to use slow writes */
577 invalidate_page_bitmap(p);
578 tlb_unprotect_code_phys(cpu_single_env, start);
583 /* len must be <= 8 and start must be a multiple of len */
584 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
589 p = page_find(start >> TARGET_PAGE_BITS);
592 if (p->code_bitmap) {
593 offset = start & ~TARGET_PAGE_MASK;
594 b = p->code_bitmap[offset >> 3] >> (offset & 7);
595 if (b & ((1 << len) - 1))
599 tb_invalidate_phys_page_range(start, start + len);
603 /* invalidate all TBs which intersect with the target virtual page
604 starting in range [start;end[. This function is usually used when
605 the target processor flushes its I-cache. NOTE: start and end must
606 refer to the same physical page */
607 void tb_invalidate_page_range(target_ulong start, target_ulong end)
611 TranslationBlock *tb, *tb_next;
613 target_ulong phys_start;
615 #if !defined(CONFIG_USER_ONLY)
618 vp = virt_page_find(start >> TARGET_PAGE_BITS);
621 if (vp->valid_tag != virt_valid_tag)
623 phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
628 p = page_find(phys_start >> TARGET_PAGE_BITS);
631 /* we remove all the TBs in the range [start, end[ */
632 /* XXX: see if in some cases it could be faster to invalidate all the code */
636 tb = (TranslationBlock *)((long)tb & ~3);
637 tb_next = tb->page_next[n];
639 if (!((pc + tb->size) <= start || pc >= end)) {
640 tb_phys_invalidate(tb, -1);
644 #if !defined(CONFIG_USER_ONLY)
645 /* if no code remaining, no need to continue to use slow writes */
647 tlb_unprotect_code(cpu_single_env, start);
651 #if !defined(CONFIG_SOFTMMU)
652 static void tb_invalidate_phys_page(target_ulong addr)
656 TranslationBlock *tb;
658 addr &= TARGET_PAGE_MASK;
659 p = page_find(addr >> TARGET_PAGE_BITS);
665 tb = (TranslationBlock *)((long)tb & ~3);
666 tb_phys_invalidate(tb, addr);
667 tb = tb->page_next[n];
673 /* add the tb in the target page and protect it if necessary */
674 static inline void tb_alloc_page(TranslationBlock *tb,
675 unsigned int n, unsigned int page_addr)
678 TranslationBlock *last_first_tb;
680 tb->page_addr[n] = page_addr;
681 p = page_find(page_addr >> TARGET_PAGE_BITS);
682 tb->page_next[n] = p->first_tb;
683 last_first_tb = p->first_tb;
684 p->first_tb = (TranslationBlock *)((long)tb | n);
685 invalidate_page_bitmap(p);
687 #if defined(CONFIG_USER_ONLY)
688 if (p->flags & PAGE_WRITE) {
689 unsigned long host_start, host_end, addr;
692 /* force the host page as non writable (writes will have a
693 page fault + mprotect overhead) */
694 host_start = page_addr & host_page_mask;
695 host_end = host_start + host_page_size;
697 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
698 prot |= page_get_flags(addr);
699 mprotect((void *)host_start, host_page_size,
700 (prot & PAGE_BITS) & ~PAGE_WRITE);
701 #ifdef DEBUG_TB_INVALIDATE
702 printf("protecting code page: 0x%08lx\n",
705 p->flags &= ~PAGE_WRITE;
708 /* if some code is already present, then the pages are already
709 protected. So we handle the case where only the first TB is
710 allocated in a physical page */
711 if (!last_first_tb) {
712 target_ulong virt_addr;
714 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
715 tlb_protect_code(cpu_single_env, virt_addr);
720 /* Allocate a new translation block. Flush the translation buffer if
721 too many translation blocks or too much generated code. */
722 TranslationBlock *tb_alloc(unsigned long pc)
724 TranslationBlock *tb;
726 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
727 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
734 /* add a new TB and link it to the physical page tables. phys_page2 is
735 (-1) to indicate that only one page contains the TB. */
736 void tb_link_phys(TranslationBlock *tb,
737 target_ulong phys_pc, target_ulong phys_page2)
740 TranslationBlock **ptb;
742 /* add in the physical hash table */
743 h = tb_phys_hash_func(phys_pc);
744 ptb = &tb_phys_hash[h];
745 tb->phys_hash_next = *ptb;
748 /* add in the page list */
749 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
750 if (phys_page2 != -1)
751 tb_alloc_page(tb, 1, phys_page2);
753 tb->page_addr[1] = -1;
754 #ifdef DEBUG_TB_CHECK
759 /* link the tb with the other TBs */
760 void tb_link(TranslationBlock *tb)
762 #if !defined(CONFIG_USER_ONLY)
767 /* save the code memory mappings (needed to invalidate the code) */
768 addr = tb->pc & TARGET_PAGE_MASK;
769 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
770 #ifdef DEBUG_TLB_CHECK
771 if (vp->valid_tag == virt_valid_tag &&
772 vp->phys_addr != tb->page_addr[0]) {
773 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
774 addr, tb->page_addr[0], vp->phys_addr);
777 vp->phys_addr = tb->page_addr[0];
778 vp->valid_tag = virt_valid_tag;
780 if (tb->page_addr[1] != -1) {
781 addr += TARGET_PAGE_SIZE;
782 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
783 #ifdef DEBUG_TLB_CHECK
784 if (vp->valid_tag == virt_valid_tag &&
785 vp->phys_addr != tb->page_addr[1]) {
786 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
787 addr, tb->page_addr[1], vp->phys_addr);
790 vp->phys_addr = tb->page_addr[1];
791 vp->valid_tag = virt_valid_tag;
796 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
797 tb->jmp_next[0] = NULL;
798 tb->jmp_next[1] = NULL;
800 /* init original jump addresses */
801 if (tb->tb_next_offset[0] != 0xffff)
802 tb_reset_jump(tb, 0);
803 if (tb->tb_next_offset[1] != 0xffff)
804 tb_reset_jump(tb, 1);
807 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
808 tb[1].tc_ptr. Return NULL if not found */
809 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
813 TranslationBlock *tb;
817 if (tc_ptr < (unsigned long)code_gen_buffer ||
818 tc_ptr >= (unsigned long)code_gen_ptr)
820 /* binary search (cf Knuth) */
823 while (m_min <= m_max) {
824 m = (m_min + m_max) >> 1;
826 v = (unsigned long)tb->tc_ptr;
829 else if (tc_ptr < v) {
838 static void tb_reset_jump_recursive(TranslationBlock *tb);
840 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
842 TranslationBlock *tb1, *tb_next, **ptb;
845 tb1 = tb->jmp_next[n];
847 /* find head of list */
850 tb1 = (TranslationBlock *)((long)tb1 & ~3);
853 tb1 = tb1->jmp_next[n1];
855 /* we are now sure now that tb jumps to tb1 */
858 /* remove tb from the jmp_first list */
859 ptb = &tb_next->jmp_first;
863 tb1 = (TranslationBlock *)((long)tb1 & ~3);
864 if (n1 == n && tb1 == tb)
866 ptb = &tb1->jmp_next[n1];
868 *ptb = tb->jmp_next[n];
869 tb->jmp_next[n] = NULL;
871 /* suppress the jump to next tb in generated code */
872 tb_reset_jump(tb, n);
874 /* suppress jumps in the tb on which we could have jumped */
875 tb_reset_jump_recursive(tb_next);
879 static void tb_reset_jump_recursive(TranslationBlock *tb)
881 tb_reset_jump_recursive2(tb, 0);
882 tb_reset_jump_recursive2(tb, 1);
885 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
886 breakpoint is reached */
887 int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
889 #if defined(TARGET_I386)
892 for(i = 0; i < env->nb_breakpoints; i++) {
893 if (env->breakpoints[i] == pc)
897 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
899 env->breakpoints[env->nb_breakpoints++] = pc;
900 tb_invalidate_page_range(pc, pc + 1);
907 /* remove a breakpoint */
908 int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
910 #if defined(TARGET_I386)
912 for(i = 0; i < env->nb_breakpoints; i++) {
913 if (env->breakpoints[i] == pc)
918 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
919 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
920 env->nb_breakpoints--;
921 tb_invalidate_page_range(pc, pc + 1);
928 /* enable or disable single step mode. EXCP_DEBUG is returned by the
929 CPU loop after each instruction */
930 void cpu_single_step(CPUState *env, int enabled)
932 #if defined(TARGET_I386)
933 if (env->singlestep_enabled != enabled) {
934 env->singlestep_enabled = enabled;
935 /* must flush all the translated code to avoid inconsistancies */
936 /* XXX: only flush what is necessary */
942 /* enable or disable low levels log */
943 void cpu_set_log(int log_flags)
945 loglevel = log_flags;
946 if (loglevel && !logfile) {
947 logfile = fopen(logfilename, "w");
952 #if !defined(CONFIG_SOFTMMU)
953 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
955 static uint8_t logfile_buf[4096];
956 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
959 setvbuf(logfile, NULL, _IOLBF, 0);
964 void cpu_set_log_filename(const char *filename)
966 logfilename = strdup(filename);
969 /* mask must never be zero, except for A20 change call */
970 void cpu_interrupt(CPUState *env, int mask)
972 TranslationBlock *tb;
974 env->interrupt_request |= mask;
975 /* if the cpu is currently executing code, we must unlink it and
976 all the potentially executing TB */
977 tb = env->current_tb;
979 tb_reset_jump_recursive(tb);
984 void cpu_abort(CPUState *env, const char *fmt, ...)
989 fprintf(stderr, "qemu: fatal: ");
990 vfprintf(stderr, fmt, ap);
991 fprintf(stderr, "\n");
993 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
999 #if !defined(CONFIG_USER_ONLY)
1001 void tlb_flush(CPUState *env)
1005 #if defined(DEBUG_TLB)
1006 printf("tlb_flush:\n");
1008 /* must reset current TB so that interrupts cannot modify the
1009 links while we are modifying them */
1010 env->current_tb = NULL;
1012 for(i = 0; i < CPU_TLB_SIZE; i++) {
1013 env->tlb_read[0][i].address = -1;
1014 env->tlb_write[0][i].address = -1;
1015 env->tlb_read[1][i].address = -1;
1016 env->tlb_write[1][i].address = -1;
1020 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1023 #if !defined(CONFIG_SOFTMMU)
1024 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1028 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
1030 if (addr == (tlb_entry->address &
1031 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1032 tlb_entry->address = -1;
1035 void tlb_flush_page(CPUState *env, uint32_t addr)
1040 TranslationBlock *tb;
1042 #if defined(DEBUG_TLB)
1043 printf("tlb_flush_page: 0x%08x\n", addr);
1045 /* must reset current TB so that interrupts cannot modify the
1046 links while we are modifying them */
1047 env->current_tb = NULL;
1049 addr &= TARGET_PAGE_MASK;
1050 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1051 tlb_flush_entry(&env->tlb_read[0][i], addr);
1052 tlb_flush_entry(&env->tlb_write[0][i], addr);
1053 tlb_flush_entry(&env->tlb_read[1][i], addr);
1054 tlb_flush_entry(&env->tlb_write[1][i], addr);
1056 /* remove from the virtual pc hash table all the TB at this
1059 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1060 if (vp && vp->valid_tag == virt_valid_tag) {
1061 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1063 /* we remove all the links to the TBs in this virtual page */
1065 while (tb != NULL) {
1067 tb = (TranslationBlock *)((long)tb & ~3);
1068 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1069 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1072 tb = tb->page_next[n];
1078 #if !defined(CONFIG_SOFTMMU)
1079 if (addr < MMAP_AREA_END)
1080 munmap((void *)addr, TARGET_PAGE_SIZE);
1084 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1086 if (addr == (tlb_entry->address &
1087 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1088 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1089 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1090 tlb_entry->address |= IO_MEM_CODE;
1091 tlb_entry->addend -= (unsigned long)phys_ram_base;
1095 /* update the TLBs so that writes to code in the virtual page 'addr'
1097 static void tlb_protect_code(CPUState *env, uint32_t addr)
1101 addr &= TARGET_PAGE_MASK;
1102 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1103 tlb_protect_code1(&env->tlb_write[0][i], addr);
1104 tlb_protect_code1(&env->tlb_write[1][i], addr);
1105 #if !defined(CONFIG_SOFTMMU)
1106 /* NOTE: as we generated the code for this page, it is already at
1108 if (addr < MMAP_AREA_END)
1109 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1113 static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1115 if (addr == (tlb_entry->address &
1116 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1117 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
1118 tlb_entry->address &= TARGET_PAGE_MASK;
1119 tlb_entry->addend += (unsigned long)phys_ram_base;
1123 /* update the TLB so that writes in virtual page 'addr' are no longer
1124 tested self modifying code */
1125 static void tlb_unprotect_code(CPUState *env, uint32_t addr)
1129 addr &= TARGET_PAGE_MASK;
1130 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1131 tlb_unprotect_code1(&env->tlb_write[0][i], addr);
1132 tlb_unprotect_code1(&env->tlb_write[1][i], addr);
1135 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1138 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1139 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1140 tlb_entry->address &= TARGET_PAGE_MASK;
1141 tlb_entry->addend += (unsigned long)phys_ram_base;
1145 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1146 tested self modifying code */
1147 /* XXX: find a way to improve it */
1148 static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr)
1152 phys_addr &= TARGET_PAGE_MASK;
1153 for(i = 0; i < CPU_TLB_SIZE; i++)
1154 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1155 for(i = 0; i < CPU_TLB_SIZE; i++)
1156 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1159 /* add a new TLB entry. At most one entry for a given virtual
1160 address is permitted. */
1161 int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1162 int is_user, int is_softmmu)
1166 TranslationBlock *first_tb;
1168 target_ulong address, addend;
1171 p = page_find(paddr >> TARGET_PAGE_BITS);
1173 pd = IO_MEM_UNASSIGNED;
1176 pd = p->phys_offset;
1177 first_tb = p->first_tb;
1179 #if defined(DEBUG_TLB)
1180 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1181 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1185 #if !defined(CONFIG_SOFTMMU)
1189 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1190 /* IO memory case */
1191 address = vaddr | pd;
1194 /* standard memory */
1196 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1199 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1201 if (prot & PROT_READ) {
1202 env->tlb_read[is_user][index].address = address;
1203 env->tlb_read[is_user][index].addend = addend;
1205 env->tlb_read[is_user][index].address = -1;
1206 env->tlb_read[is_user][index].addend = -1;
1208 if (prot & PROT_WRITE) {
1209 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1210 /* ROM: access is ignored (same as unassigned) */
1211 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1212 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1213 } else if (first_tb) {
1214 /* if code is present, we use a specific memory
1215 handler. It works only for physical memory access */
1216 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1217 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1219 env->tlb_write[is_user][index].address = address;
1220 env->tlb_write[is_user][index].addend = addend;
1223 env->tlb_write[is_user][index].address = -1;
1224 env->tlb_write[is_user][index].addend = -1;
1227 #if !defined(CONFIG_SOFTMMU)
1229 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1230 /* IO access: no mapping is done as it will be handled by the
1232 if (!(env->hflags & HF_SOFTMMU_MASK))
1236 if (prot & PROT_WRITE) {
1237 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || first_tb) {
1238 /* ROM: we do as if code was inside */
1239 /* if code is present, we only map as read only and save the
1243 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1246 vp->valid_tag = virt_valid_tag;
1247 prot &= ~PAGE_WRITE;
1250 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1251 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1252 if (map_addr == MAP_FAILED) {
1253 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1262 /* called from signal handler: invalidate the code and unprotect the
1263 page. Return TRUE if the fault was succesfully handled. */
1264 int page_unprotect(unsigned long addr)
1266 #if !defined(CONFIG_SOFTMMU)
1269 #if defined(DEBUG_TLB)
1270 printf("page_unprotect: addr=0x%08x\n", addr);
1272 addr &= TARGET_PAGE_MASK;
1273 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1276 /* NOTE: in this case, validate_tag is _not_ tested as it
1277 validates only the code TLB */
1278 if (vp->valid_tag != virt_valid_tag)
1280 if (!(vp->prot & PAGE_WRITE))
1282 #if defined(DEBUG_TLB)
1283 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1284 addr, vp->phys_addr, vp->prot);
1286 tb_invalidate_phys_page(vp->phys_addr);
1287 mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot);
1296 void tlb_flush(CPUState *env)
1300 void tlb_flush_page(CPUState *env, uint32_t addr)
1304 void tlb_flush_page_write(CPUState *env, uint32_t addr)
1308 int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1309 int is_user, int is_softmmu)
1314 /* dump memory mappings */
1315 void page_dump(FILE *f)
1317 unsigned long start, end;
1318 int i, j, prot, prot1;
1321 fprintf(f, "%-8s %-8s %-8s %s\n",
1322 "start", "end", "size", "prot");
1326 for(i = 0; i <= L1_SIZE; i++) {
1331 for(j = 0;j < L2_SIZE; j++) {
1336 if (prot1 != prot) {
1337 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1339 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1340 start, end, end - start,
1341 prot & PAGE_READ ? 'r' : '-',
1342 prot & PAGE_WRITE ? 'w' : '-',
1343 prot & PAGE_EXEC ? 'x' : '-');
1357 int page_get_flags(unsigned long address)
1361 p = page_find(address >> TARGET_PAGE_BITS);
1367 /* modify the flags of a page and invalidate the code if
1368 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1369 depending on PAGE_WRITE */
1370 void page_set_flags(unsigned long start, unsigned long end, int flags)
1375 start = start & TARGET_PAGE_MASK;
1376 end = TARGET_PAGE_ALIGN(end);
1377 if (flags & PAGE_WRITE)
1378 flags |= PAGE_WRITE_ORG;
1379 spin_lock(&tb_lock);
1380 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1381 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1382 /* if the write protection is set, then we invalidate the code
1384 if (!(p->flags & PAGE_WRITE) &&
1385 (flags & PAGE_WRITE) &&
1387 tb_invalidate_phys_page(addr);
1391 spin_unlock(&tb_lock);
1394 /* called from signal handler: invalidate the code and unprotect the
1395 page. Return TRUE if the fault was succesfully handled. */
1396 int page_unprotect(unsigned long address)
1398 unsigned int page_index, prot, pindex;
1400 unsigned long host_start, host_end, addr;
1402 host_start = address & host_page_mask;
1403 page_index = host_start >> TARGET_PAGE_BITS;
1404 p1 = page_find(page_index);
1407 host_end = host_start + host_page_size;
1410 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1414 /* if the page was really writable, then we change its
1415 protection back to writable */
1416 if (prot & PAGE_WRITE_ORG) {
1417 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1418 if (!(p1[pindex].flags & PAGE_WRITE)) {
1419 mprotect((void *)host_start, host_page_size,
1420 (prot & PAGE_BITS) | PAGE_WRITE);
1421 p1[pindex].flags |= PAGE_WRITE;
1422 /* and since the content will be modified, we must invalidate
1423 the corresponding translated code. */
1424 tb_invalidate_phys_page(address);
1425 #ifdef DEBUG_TB_CHECK
1426 tb_invalidate_check(address);
1434 /* call this function when system calls directly modify a memory area */
1435 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1437 unsigned long start, end, addr;
1439 start = (unsigned long)data;
1440 end = start + data_size;
1441 start &= TARGET_PAGE_MASK;
1442 end = TARGET_PAGE_ALIGN(end);
1443 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1444 page_unprotect(addr);
1448 #endif /* defined(CONFIG_USER_ONLY) */
1450 /* register physical memory. 'size' must be a multiple of the target
1451 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1453 void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
1456 unsigned long addr, end_addr;
1459 end_addr = start_addr + size;
1460 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
1461 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1462 p->phys_offset = phys_offset;
1463 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1464 phys_offset += TARGET_PAGE_SIZE;
1468 static uint32_t unassigned_mem_readb(uint32_t addr)
1473 static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
1477 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1478 unassigned_mem_readb,
1479 unassigned_mem_readb,
1480 unassigned_mem_readb,
1483 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1484 unassigned_mem_writeb,
1485 unassigned_mem_writeb,
1486 unassigned_mem_writeb,
1489 /* self modifying code support in soft mmu mode : writing to a page
1490 containing code comes to these functions */
1492 static void code_mem_writeb(uint32_t addr, uint32_t val)
1494 #if !defined(CONFIG_USER_ONLY)
1495 tb_invalidate_phys_page_fast(addr, 1);
1497 stb_raw(phys_ram_base + addr, val);
1500 static void code_mem_writew(uint32_t addr, uint32_t val)
1502 #if !defined(CONFIG_USER_ONLY)
1503 tb_invalidate_phys_page_fast(addr, 2);
1505 stw_raw(phys_ram_base + addr, val);
1508 static void code_mem_writel(uint32_t addr, uint32_t val)
1510 #if !defined(CONFIG_USER_ONLY)
1511 tb_invalidate_phys_page_fast(addr, 4);
1513 stl_raw(phys_ram_base + addr, val);
1516 static CPUReadMemoryFunc *code_mem_read[3] = {
1517 NULL, /* never used */
1518 NULL, /* never used */
1519 NULL, /* never used */
1522 static CPUWriteMemoryFunc *code_mem_write[3] = {
1528 static void io_mem_init(void)
1530 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1531 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1532 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1536 /* mem_read and mem_write are arrays of functions containing the
1537 function to access byte (index 0), word (index 1) and dword (index
1538 2). All functions must be supplied. If io_index is non zero, the
1539 corresponding io zone is modified. If it is zero, a new io zone is
1540 allocated. The return value can be used with
1541 cpu_register_physical_memory(). (-1) is returned if error. */
1542 int cpu_register_io_memory(int io_index,
1543 CPUReadMemoryFunc **mem_read,
1544 CPUWriteMemoryFunc **mem_write)
1548 if (io_index <= 0) {
1549 if (io_index >= IO_MEM_NB_ENTRIES)
1551 io_index = io_mem_nb++;
1553 if (io_index >= IO_MEM_NB_ENTRIES)
1557 for(i = 0;i < 3; i++) {
1558 io_mem_read[io_index][i] = mem_read[i];
1559 io_mem_write[io_index][i] = mem_write[i];
1561 return io_index << IO_MEM_SHIFT;
1564 #if !defined(CONFIG_USER_ONLY)
1566 #define MMUSUFFIX _cmmu
1567 #define GETPC() NULL
1568 #define env cpu_single_env
1571 #include "softmmu_template.h"
1574 #include "softmmu_template.h"
1577 #include "softmmu_template.h"
1580 #include "softmmu_template.h"