2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
45 /* make various TB consistency checks */
46 //#define DEBUG_TB_CHECK
47 //#define DEBUG_TLB_CHECK
49 /* threshold to flush the translated code buffer */
50 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
52 #define SMC_BITMAP_USE_THRESHOLD 10
54 #define MMAP_AREA_START 0x00000000
55 #define MMAP_AREA_END 0xa8000000
57 #if defined(TARGET_SPARC64)
58 #define TARGET_PHYS_ADDR_SPACE_BITS 41
59 #elif defined(TARGET_PPC64)
60 #define TARGET_PHYS_ADDR_SPACE_BITS 42
62 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
63 #define TARGET_PHYS_ADDR_SPACE_BITS 32
66 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
67 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
69 /* any access to the tbs or the page table must use this lock */
70 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
72 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
73 uint8_t *code_gen_ptr;
77 uint8_t *phys_ram_base;
78 uint8_t *phys_ram_dirty;
81 /* current CPU in the current thread. It is only valid inside
83 CPUState *cpu_single_env;
85 typedef struct PageDesc {
86 /* list of TBs intersecting this ram page */
87 TranslationBlock *first_tb;
88 /* in order to optimize self modifying code, we count the number
89 of lookups we do to a given page to use a bitmap */
90 unsigned int code_write_count;
92 #if defined(CONFIG_USER_ONLY)
97 typedef struct PhysPageDesc {
98 /* offset in host memory of the page + io_index in the low 12 bits */
103 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
105 #define L1_SIZE (1 << L1_BITS)
106 #define L2_SIZE (1 << L2_BITS)
108 static void io_mem_init(void);
110 unsigned long qemu_real_host_page_size;
111 unsigned long qemu_host_page_bits;
112 unsigned long qemu_host_page_size;
113 unsigned long qemu_host_page_mask;
115 /* XXX: for system emulation, it could just be an array */
116 static PageDesc *l1_map[L1_SIZE];
117 PhysPageDesc **l1_phys_map;
119 /* io memory support */
120 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
122 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
123 static int io_mem_nb;
126 char *logfilename = "/tmp/qemu.log";
131 static int tlb_flush_count;
132 static int tb_flush_count;
133 static int tb_phys_invalidate_count;
135 static void page_init(void)
137 /* NOTE: we can always suppose that qemu_host_page_size >=
141 SYSTEM_INFO system_info;
144 GetSystemInfo(&system_info);
145 qemu_real_host_page_size = system_info.dwPageSize;
147 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
148 PAGE_EXECUTE_READWRITE, &old_protect);
151 qemu_real_host_page_size = getpagesize();
153 unsigned long start, end;
155 start = (unsigned long)code_gen_buffer;
156 start &= ~(qemu_real_host_page_size - 1);
158 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
159 end += qemu_real_host_page_size - 1;
160 end &= ~(qemu_real_host_page_size - 1);
162 mprotect((void *)start, end - start,
163 PROT_READ | PROT_WRITE | PROT_EXEC);
167 if (qemu_host_page_size == 0)
168 qemu_host_page_size = qemu_real_host_page_size;
169 if (qemu_host_page_size < TARGET_PAGE_SIZE)
170 qemu_host_page_size = TARGET_PAGE_SIZE;
171 qemu_host_page_bits = 0;
172 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
173 qemu_host_page_bits++;
174 qemu_host_page_mask = ~(qemu_host_page_size - 1);
175 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
176 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
179 static inline PageDesc *page_find_alloc(unsigned int index)
183 lp = &l1_map[index >> L2_BITS];
186 /* allocate if not found */
187 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
188 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
191 return p + (index & (L2_SIZE - 1));
194 static inline PageDesc *page_find(unsigned int index)
198 p = l1_map[index >> L2_BITS];
201 return p + (index & (L2_SIZE - 1));
204 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
208 p = (void **)l1_phys_map;
209 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
211 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
212 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
214 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
217 /* allocate if not found */
220 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
221 memset(p, 0, sizeof(void *) * L1_SIZE);
225 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
228 /* allocate if not found */
231 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
232 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
235 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
238 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
240 return phys_page_find_alloc(index, 0);
243 #if !defined(CONFIG_USER_ONLY)
244 static void tlb_protect_code(ram_addr_t ram_addr);
245 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
249 void cpu_exec_init(CPUState *env)
255 code_gen_ptr = code_gen_buffer;
259 env->next_cpu = NULL;
262 while (*penv != NULL) {
263 penv = (CPUState **)&(*penv)->next_cpu;
266 env->cpu_index = cpu_index;
270 static inline void invalidate_page_bitmap(PageDesc *p)
272 if (p->code_bitmap) {
273 qemu_free(p->code_bitmap);
274 p->code_bitmap = NULL;
276 p->code_write_count = 0;
279 /* set to NULL all the 'first_tb' fields in all PageDescs */
280 static void page_flush_tb(void)
285 for(i = 0; i < L1_SIZE; i++) {
288 for(j = 0; j < L2_SIZE; j++) {
290 invalidate_page_bitmap(p);
297 /* flush all the translation blocks */
298 /* XXX: tb_flush is currently not thread safe */
299 void tb_flush(CPUState *env1)
302 #if defined(DEBUG_FLUSH)
303 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
304 code_gen_ptr - code_gen_buffer,
306 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
310 for(env = first_cpu; env != NULL; env = env->next_cpu) {
311 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
314 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
317 code_gen_ptr = code_gen_buffer;
318 /* XXX: flush processor icache at this point if cache flush is
323 #ifdef DEBUG_TB_CHECK
325 static void tb_invalidate_check(unsigned long address)
327 TranslationBlock *tb;
329 address &= TARGET_PAGE_MASK;
330 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
331 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
332 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
333 address >= tb->pc + tb->size)) {
334 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
335 address, tb->pc, tb->size);
341 /* verify that all the pages have correct rights for code */
342 static void tb_page_check(void)
344 TranslationBlock *tb;
345 int i, flags1, flags2;
347 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
348 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
349 flags1 = page_get_flags(tb->pc);
350 flags2 = page_get_flags(tb->pc + tb->size - 1);
351 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
352 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
353 tb->pc, tb->size, flags1, flags2);
359 void tb_jmp_check(TranslationBlock *tb)
361 TranslationBlock *tb1;
364 /* suppress any remaining jumps to this TB */
368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
371 tb1 = tb1->jmp_next[n1];
373 /* check end of list */
375 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
381 /* invalidate one TB */
382 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
385 TranslationBlock *tb1;
389 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
392 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
396 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
398 TranslationBlock *tb1;
404 tb1 = (TranslationBlock *)((long)tb1 & ~3);
406 *ptb = tb1->page_next[n1];
409 ptb = &tb1->page_next[n1];
413 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
415 TranslationBlock *tb1, **ptb;
418 ptb = &tb->jmp_next[n];
421 /* find tb(n) in circular list */
425 tb1 = (TranslationBlock *)((long)tb1 & ~3);
426 if (n1 == n && tb1 == tb)
429 ptb = &tb1->jmp_first;
431 ptb = &tb1->jmp_next[n1];
434 /* now we can suppress tb(n) from the list */
435 *ptb = tb->jmp_next[n];
437 tb->jmp_next[n] = NULL;
441 /* reset the jump entry 'n' of a TB so that it is not chained to
443 static inline void tb_reset_jump(TranslationBlock *tb, int n)
445 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
448 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
453 target_ulong phys_pc;
454 TranslationBlock *tb1, *tb2;
456 /* remove the TB from the hash list */
457 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
458 h = tb_phys_hash_func(phys_pc);
459 tb_remove(&tb_phys_hash[h], tb,
460 offsetof(TranslationBlock, phys_hash_next));
462 /* remove the TB from the page list */
463 if (tb->page_addr[0] != page_addr) {
464 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
465 tb_page_remove(&p->first_tb, tb);
466 invalidate_page_bitmap(p);
468 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
469 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
470 tb_page_remove(&p->first_tb, tb);
471 invalidate_page_bitmap(p);
474 tb_invalidated_flag = 1;
476 /* remove the TB from the hash list */
477 h = tb_jmp_cache_hash_func(tb->pc);
478 for(env = first_cpu; env != NULL; env = env->next_cpu) {
479 if (env->tb_jmp_cache[h] == tb)
480 env->tb_jmp_cache[h] = NULL;
483 /* suppress this TB from the two jump lists */
484 tb_jmp_remove(tb, 0);
485 tb_jmp_remove(tb, 1);
487 /* suppress any remaining jumps to this TB */
493 tb1 = (TranslationBlock *)((long)tb1 & ~3);
494 tb2 = tb1->jmp_next[n1];
495 tb_reset_jump(tb1, n1);
496 tb1->jmp_next[n1] = NULL;
499 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
501 tb_phys_invalidate_count++;
504 static inline void set_bits(uint8_t *tab, int start, int len)
510 mask = 0xff << (start & 7);
511 if ((start & ~7) == (end & ~7)) {
513 mask &= ~(0xff << (end & 7));
518 start = (start + 8) & ~7;
520 while (start < end1) {
525 mask = ~(0xff << (end & 7));
531 static void build_page_bitmap(PageDesc *p)
533 int n, tb_start, tb_end;
534 TranslationBlock *tb;
536 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
539 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
544 tb = (TranslationBlock *)((long)tb & ~3);
545 /* NOTE: this is subtle as a TB may span two physical pages */
547 /* NOTE: tb_end may be after the end of the page, but
548 it is not a problem */
549 tb_start = tb->pc & ~TARGET_PAGE_MASK;
550 tb_end = tb_start + tb->size;
551 if (tb_end > TARGET_PAGE_SIZE)
552 tb_end = TARGET_PAGE_SIZE;
555 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
557 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
558 tb = tb->page_next[n];
562 #ifdef TARGET_HAS_PRECISE_SMC
564 static void tb_gen_code(CPUState *env,
565 target_ulong pc, target_ulong cs_base, int flags,
568 TranslationBlock *tb;
570 target_ulong phys_pc, phys_page2, virt_page2;
573 phys_pc = get_phys_addr_code(env, pc);
576 /* flush must be done */
578 /* cannot fail at this point */
581 tc_ptr = code_gen_ptr;
583 tb->cs_base = cs_base;
586 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
587 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
589 /* check next page if needed */
590 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
592 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
593 phys_page2 = get_phys_addr_code(env, virt_page2);
595 tb_link_phys(tb, phys_pc, phys_page2);
599 /* invalidate all TBs which intersect with the target physical page
600 starting in range [start;end[. NOTE: start and end must refer to
601 the same physical page. 'is_cpu_write_access' should be true if called
602 from a real cpu write access: the virtual CPU will exit the current
603 TB if code is modified inside this TB. */
604 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
605 int is_cpu_write_access)
607 int n, current_tb_modified, current_tb_not_found, current_flags;
608 CPUState *env = cpu_single_env;
610 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
611 target_ulong tb_start, tb_end;
612 target_ulong current_pc, current_cs_base;
614 p = page_find(start >> TARGET_PAGE_BITS);
617 if (!p->code_bitmap &&
618 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
619 is_cpu_write_access) {
620 /* build code bitmap */
621 build_page_bitmap(p);
624 /* we remove all the TBs in the range [start, end[ */
625 /* XXX: see if in some cases it could be faster to invalidate all the code */
626 current_tb_not_found = is_cpu_write_access;
627 current_tb_modified = 0;
628 current_tb = NULL; /* avoid warning */
629 current_pc = 0; /* avoid warning */
630 current_cs_base = 0; /* avoid warning */
631 current_flags = 0; /* avoid warning */
635 tb = (TranslationBlock *)((long)tb & ~3);
636 tb_next = tb->page_next[n];
637 /* NOTE: this is subtle as a TB may span two physical pages */
639 /* NOTE: tb_end may be after the end of the page, but
640 it is not a problem */
641 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
642 tb_end = tb_start + tb->size;
644 tb_start = tb->page_addr[1];
645 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
647 if (!(tb_end <= start || tb_start >= end)) {
648 #ifdef TARGET_HAS_PRECISE_SMC
649 if (current_tb_not_found) {
650 current_tb_not_found = 0;
652 if (env->mem_write_pc) {
653 /* now we have a real cpu fault */
654 current_tb = tb_find_pc(env->mem_write_pc);
657 if (current_tb == tb &&
658 !(current_tb->cflags & CF_SINGLE_INSN)) {
659 /* If we are modifying the current TB, we must stop
660 its execution. We could be more precise by checking
661 that the modification is after the current PC, but it
662 would require a specialized function to partially
663 restore the CPU state */
665 current_tb_modified = 1;
666 cpu_restore_state(current_tb, env,
667 env->mem_write_pc, NULL);
668 #if defined(TARGET_I386)
669 current_flags = env->hflags;
670 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
671 current_cs_base = (target_ulong)env->segs[R_CS].base;
672 current_pc = current_cs_base + env->eip;
674 #error unsupported CPU
677 #endif /* TARGET_HAS_PRECISE_SMC */
678 /* we need to do that to handle the case where a signal
679 occurs while doing tb_phys_invalidate() */
682 saved_tb = env->current_tb;
683 env->current_tb = NULL;
685 tb_phys_invalidate(tb, -1);
687 env->current_tb = saved_tb;
688 if (env->interrupt_request && env->current_tb)
689 cpu_interrupt(env, env->interrupt_request);
694 #if !defined(CONFIG_USER_ONLY)
695 /* if no code remaining, no need to continue to use slow writes */
697 invalidate_page_bitmap(p);
698 if (is_cpu_write_access) {
699 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
703 #ifdef TARGET_HAS_PRECISE_SMC
704 if (current_tb_modified) {
705 /* we generate a block containing just the instruction
706 modifying the memory. It will ensure that it cannot modify
708 env->current_tb = NULL;
709 tb_gen_code(env, current_pc, current_cs_base, current_flags,
711 cpu_resume_from_signal(env, NULL);
716 /* len must be <= 8 and start must be a multiple of len */
717 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
724 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
725 cpu_single_env->mem_write_vaddr, len,
727 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
731 p = page_find(start >> TARGET_PAGE_BITS);
734 if (p->code_bitmap) {
735 offset = start & ~TARGET_PAGE_MASK;
736 b = p->code_bitmap[offset >> 3] >> (offset & 7);
737 if (b & ((1 << len) - 1))
741 tb_invalidate_phys_page_range(start, start + len, 1);
745 #if !defined(CONFIG_SOFTMMU)
746 static void tb_invalidate_phys_page(target_ulong addr,
747 unsigned long pc, void *puc)
749 int n, current_flags, current_tb_modified;
750 target_ulong current_pc, current_cs_base;
752 TranslationBlock *tb, *current_tb;
753 #ifdef TARGET_HAS_PRECISE_SMC
754 CPUState *env = cpu_single_env;
757 addr &= TARGET_PAGE_MASK;
758 p = page_find(addr >> TARGET_PAGE_BITS);
762 current_tb_modified = 0;
764 current_pc = 0; /* avoid warning */
765 current_cs_base = 0; /* avoid warning */
766 current_flags = 0; /* avoid warning */
767 #ifdef TARGET_HAS_PRECISE_SMC
769 current_tb = tb_find_pc(pc);
774 tb = (TranslationBlock *)((long)tb & ~3);
775 #ifdef TARGET_HAS_PRECISE_SMC
776 if (current_tb == tb &&
777 !(current_tb->cflags & CF_SINGLE_INSN)) {
778 /* If we are modifying the current TB, we must stop
779 its execution. We could be more precise by checking
780 that the modification is after the current PC, but it
781 would require a specialized function to partially
782 restore the CPU state */
784 current_tb_modified = 1;
785 cpu_restore_state(current_tb, env, pc, puc);
786 #if defined(TARGET_I386)
787 current_flags = env->hflags;
788 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
789 current_cs_base = (target_ulong)env->segs[R_CS].base;
790 current_pc = current_cs_base + env->eip;
792 #error unsupported CPU
795 #endif /* TARGET_HAS_PRECISE_SMC */
796 tb_phys_invalidate(tb, addr);
797 tb = tb->page_next[n];
800 #ifdef TARGET_HAS_PRECISE_SMC
801 if (current_tb_modified) {
802 /* we generate a block containing just the instruction
803 modifying the memory. It will ensure that it cannot modify
805 env->current_tb = NULL;
806 tb_gen_code(env, current_pc, current_cs_base, current_flags,
808 cpu_resume_from_signal(env, puc);
814 /* add the tb in the target page and protect it if necessary */
815 static inline void tb_alloc_page(TranslationBlock *tb,
816 unsigned int n, target_ulong page_addr)
819 TranslationBlock *last_first_tb;
821 tb->page_addr[n] = page_addr;
822 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
823 tb->page_next[n] = p->first_tb;
824 last_first_tb = p->first_tb;
825 p->first_tb = (TranslationBlock *)((long)tb | n);
826 invalidate_page_bitmap(p);
828 #if defined(TARGET_HAS_SMC) || 1
830 #if defined(CONFIG_USER_ONLY)
831 if (p->flags & PAGE_WRITE) {
836 /* force the host page as non writable (writes will have a
837 page fault + mprotect overhead) */
838 page_addr &= qemu_host_page_mask;
840 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
841 addr += TARGET_PAGE_SIZE) {
843 p2 = page_find (addr >> TARGET_PAGE_BITS);
847 p2->flags &= ~PAGE_WRITE;
848 page_get_flags(addr);
850 mprotect(g2h(page_addr), qemu_host_page_size,
851 (prot & PAGE_BITS) & ~PAGE_WRITE);
852 #ifdef DEBUG_TB_INVALIDATE
853 printf("protecting code page: 0x%08lx\n",
858 /* if some code is already present, then the pages are already
859 protected. So we handle the case where only the first TB is
860 allocated in a physical page */
861 if (!last_first_tb) {
862 tlb_protect_code(page_addr);
866 #endif /* TARGET_HAS_SMC */
869 /* Allocate a new translation block. Flush the translation buffer if
870 too many translation blocks or too much generated code. */
871 TranslationBlock *tb_alloc(target_ulong pc)
873 TranslationBlock *tb;
875 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
876 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
884 /* add a new TB and link it to the physical page tables. phys_page2 is
885 (-1) to indicate that only one page contains the TB. */
886 void tb_link_phys(TranslationBlock *tb,
887 target_ulong phys_pc, target_ulong phys_page2)
890 TranslationBlock **ptb;
892 /* add in the physical hash table */
893 h = tb_phys_hash_func(phys_pc);
894 ptb = &tb_phys_hash[h];
895 tb->phys_hash_next = *ptb;
898 /* add in the page list */
899 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
900 if (phys_page2 != -1)
901 tb_alloc_page(tb, 1, phys_page2);
903 tb->page_addr[1] = -1;
905 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
906 tb->jmp_next[0] = NULL;
907 tb->jmp_next[1] = NULL;
909 tb->cflags &= ~CF_FP_USED;
910 if (tb->cflags & CF_TB_FP_USED)
911 tb->cflags |= CF_FP_USED;
914 /* init original jump addresses */
915 if (tb->tb_next_offset[0] != 0xffff)
916 tb_reset_jump(tb, 0);
917 if (tb->tb_next_offset[1] != 0xffff)
918 tb_reset_jump(tb, 1);
920 #ifdef DEBUG_TB_CHECK
925 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
926 tb[1].tc_ptr. Return NULL if not found */
927 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
931 TranslationBlock *tb;
935 if (tc_ptr < (unsigned long)code_gen_buffer ||
936 tc_ptr >= (unsigned long)code_gen_ptr)
938 /* binary search (cf Knuth) */
941 while (m_min <= m_max) {
942 m = (m_min + m_max) >> 1;
944 v = (unsigned long)tb->tc_ptr;
947 else if (tc_ptr < v) {
956 static void tb_reset_jump_recursive(TranslationBlock *tb);
958 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
960 TranslationBlock *tb1, *tb_next, **ptb;
963 tb1 = tb->jmp_next[n];
965 /* find head of list */
968 tb1 = (TranslationBlock *)((long)tb1 & ~3);
971 tb1 = tb1->jmp_next[n1];
973 /* we are now sure now that tb jumps to tb1 */
976 /* remove tb from the jmp_first list */
977 ptb = &tb_next->jmp_first;
981 tb1 = (TranslationBlock *)((long)tb1 & ~3);
982 if (n1 == n && tb1 == tb)
984 ptb = &tb1->jmp_next[n1];
986 *ptb = tb->jmp_next[n];
987 tb->jmp_next[n] = NULL;
989 /* suppress the jump to next tb in generated code */
990 tb_reset_jump(tb, n);
992 /* suppress jumps in the tb on which we could have jumped */
993 tb_reset_jump_recursive(tb_next);
997 static void tb_reset_jump_recursive(TranslationBlock *tb)
999 tb_reset_jump_recursive2(tb, 0);
1000 tb_reset_jump_recursive2(tb, 1);
1003 #if defined(TARGET_HAS_ICE)
1004 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1006 target_ulong addr, pd;
1007 ram_addr_t ram_addr;
1010 addr = cpu_get_phys_page_debug(env, pc);
1011 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1013 pd = IO_MEM_UNASSIGNED;
1015 pd = p->phys_offset;
1017 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1018 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1022 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1023 breakpoint is reached */
1024 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1026 #if defined(TARGET_HAS_ICE)
1029 for(i = 0; i < env->nb_breakpoints; i++) {
1030 if (env->breakpoints[i] == pc)
1034 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1036 env->breakpoints[env->nb_breakpoints++] = pc;
1038 breakpoint_invalidate(env, pc);
1045 /* remove a breakpoint */
1046 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1048 #if defined(TARGET_HAS_ICE)
1050 for(i = 0; i < env->nb_breakpoints; i++) {
1051 if (env->breakpoints[i] == pc)
1056 env->nb_breakpoints--;
1057 if (i < env->nb_breakpoints)
1058 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1060 breakpoint_invalidate(env, pc);
1067 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1068 CPU loop after each instruction */
1069 void cpu_single_step(CPUState *env, int enabled)
1071 #if defined(TARGET_HAS_ICE)
1072 if (env->singlestep_enabled != enabled) {
1073 env->singlestep_enabled = enabled;
1074 /* must flush all the translated code to avoid inconsistancies */
1075 /* XXX: only flush what is necessary */
1081 /* enable or disable low levels log */
1082 void cpu_set_log(int log_flags)
1084 loglevel = log_flags;
1085 if (loglevel && !logfile) {
1086 logfile = fopen(logfilename, "w");
1088 perror(logfilename);
1091 #if !defined(CONFIG_SOFTMMU)
1092 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1094 static uint8_t logfile_buf[4096];
1095 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1098 setvbuf(logfile, NULL, _IOLBF, 0);
1103 void cpu_set_log_filename(const char *filename)
1105 logfilename = strdup(filename);
1108 /* mask must never be zero, except for A20 change call */
1109 void cpu_interrupt(CPUState *env, int mask)
1111 TranslationBlock *tb;
1112 static int interrupt_lock;
1114 env->interrupt_request |= mask;
1115 /* if the cpu is currently executing code, we must unlink it and
1116 all the potentially executing TB */
1117 tb = env->current_tb;
1118 if (tb && !testandset(&interrupt_lock)) {
1119 env->current_tb = NULL;
1120 tb_reset_jump_recursive(tb);
1125 void cpu_reset_interrupt(CPUState *env, int mask)
1127 env->interrupt_request &= ~mask;
1130 CPULogItem cpu_log_items[] = {
1131 { CPU_LOG_TB_OUT_ASM, "out_asm",
1132 "show generated host assembly code for each compiled TB" },
1133 { CPU_LOG_TB_IN_ASM, "in_asm",
1134 "show target assembly code for each compiled TB" },
1135 { CPU_LOG_TB_OP, "op",
1136 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1138 { CPU_LOG_TB_OP_OPT, "op_opt",
1139 "show micro ops after optimization for each compiled TB" },
1141 { CPU_LOG_INT, "int",
1142 "show interrupts/exceptions in short format" },
1143 { CPU_LOG_EXEC, "exec",
1144 "show trace before each executed TB (lots of logs)" },
1145 { CPU_LOG_TB_CPU, "cpu",
1146 "show CPU state before bloc translation" },
1148 { CPU_LOG_PCALL, "pcall",
1149 "show protected mode far calls/returns/exceptions" },
1152 { CPU_LOG_IOPORT, "ioport",
1153 "show all i/o ports accesses" },
1158 static int cmp1(const char *s1, int n, const char *s2)
1160 if (strlen(s2) != n)
1162 return memcmp(s1, s2, n) == 0;
1165 /* takes a comma separated list of log masks. Return 0 if error. */
1166 int cpu_str_to_log_mask(const char *str)
1175 p1 = strchr(p, ',');
1178 if(cmp1(p,p1-p,"all")) {
1179 for(item = cpu_log_items; item->mask != 0; item++) {
1183 for(item = cpu_log_items; item->mask != 0; item++) {
1184 if (cmp1(p, p1 - p, item->name))
1198 void cpu_abort(CPUState *env, const char *fmt, ...)
1203 fprintf(stderr, "qemu: fatal: ");
1204 vfprintf(stderr, fmt, ap);
1205 fprintf(stderr, "\n");
1207 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1209 cpu_dump_state(env, stderr, fprintf, 0);
1215 #if !defined(CONFIG_USER_ONLY)
1217 /* NOTE: if flush_global is true, also flush global entries (not
1219 void tlb_flush(CPUState *env, int flush_global)
1223 #if defined(DEBUG_TLB)
1224 printf("tlb_flush:\n");
1226 /* must reset current TB so that interrupts cannot modify the
1227 links while we are modifying them */
1228 env->current_tb = NULL;
1230 for(i = 0; i < CPU_TLB_SIZE; i++) {
1231 env->tlb_table[0][i].addr_read = -1;
1232 env->tlb_table[0][i].addr_write = -1;
1233 env->tlb_table[0][i].addr_code = -1;
1234 env->tlb_table[1][i].addr_read = -1;
1235 env->tlb_table[1][i].addr_write = -1;
1236 env->tlb_table[1][i].addr_code = -1;
1239 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1241 #if !defined(CONFIG_SOFTMMU)
1242 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1245 if (env->kqemu_enabled) {
1246 kqemu_flush(env, flush_global);
1252 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1254 if (addr == (tlb_entry->addr_read &
1255 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1256 addr == (tlb_entry->addr_write &
1257 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1258 addr == (tlb_entry->addr_code &
1259 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1260 tlb_entry->addr_read = -1;
1261 tlb_entry->addr_write = -1;
1262 tlb_entry->addr_code = -1;
1266 void tlb_flush_page(CPUState *env, target_ulong addr)
1269 TranslationBlock *tb;
1271 #if defined(DEBUG_TLB)
1272 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1274 /* must reset current TB so that interrupts cannot modify the
1275 links while we are modifying them */
1276 env->current_tb = NULL;
1278 addr &= TARGET_PAGE_MASK;
1279 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1280 tlb_flush_entry(&env->tlb_table[0][i], addr);
1281 tlb_flush_entry(&env->tlb_table[1][i], addr);
1283 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1284 tb = env->tb_jmp_cache[i];
1286 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1287 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1288 env->tb_jmp_cache[i] = NULL;
1292 #if !defined(CONFIG_SOFTMMU)
1293 if (addr < MMAP_AREA_END)
1294 munmap((void *)addr, TARGET_PAGE_SIZE);
1297 if (env->kqemu_enabled) {
1298 kqemu_flush_page(env, addr);
1303 /* update the TLBs so that writes to code in the virtual page 'addr'
1305 static void tlb_protect_code(ram_addr_t ram_addr)
1307 cpu_physical_memory_reset_dirty(ram_addr,
1308 ram_addr + TARGET_PAGE_SIZE,
1312 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1313 tested for self modifying code */
1314 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1317 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1320 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1321 unsigned long start, unsigned long length)
1324 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1325 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1326 if ((addr - start) < length) {
1327 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1332 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1336 unsigned long length, start1;
1340 start &= TARGET_PAGE_MASK;
1341 end = TARGET_PAGE_ALIGN(end);
1343 length = end - start;
1346 len = length >> TARGET_PAGE_BITS;
1348 /* XXX: should not depend on cpu context */
1350 if (env->kqemu_enabled) {
1353 for(i = 0; i < len; i++) {
1354 kqemu_set_notdirty(env, addr);
1355 addr += TARGET_PAGE_SIZE;
1359 mask = ~dirty_flags;
1360 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1361 for(i = 0; i < len; i++)
1364 /* we modify the TLB cache so that the dirty bit will be set again
1365 when accessing the range */
1366 start1 = start + (unsigned long)phys_ram_base;
1367 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1368 for(i = 0; i < CPU_TLB_SIZE; i++)
1369 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1370 for(i = 0; i < CPU_TLB_SIZE; i++)
1371 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1374 #if !defined(CONFIG_SOFTMMU)
1375 /* XXX: this is expensive */
1381 for(i = 0; i < L1_SIZE; i++) {
1384 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1385 for(j = 0; j < L2_SIZE; j++) {
1386 if (p->valid_tag == virt_valid_tag &&
1387 p->phys_addr >= start && p->phys_addr < end &&
1388 (p->prot & PROT_WRITE)) {
1389 if (addr < MMAP_AREA_END) {
1390 mprotect((void *)addr, TARGET_PAGE_SIZE,
1391 p->prot & ~PROT_WRITE);
1394 addr += TARGET_PAGE_SIZE;
1403 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1405 ram_addr_t ram_addr;
1407 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1408 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1409 tlb_entry->addend - (unsigned long)phys_ram_base;
1410 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1411 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1416 /* update the TLB according to the current state of the dirty bits */
1417 void cpu_tlb_update_dirty(CPUState *env)
1420 for(i = 0; i < CPU_TLB_SIZE; i++)
1421 tlb_update_dirty(&env->tlb_table[0][i]);
1422 for(i = 0; i < CPU_TLB_SIZE; i++)
1423 tlb_update_dirty(&env->tlb_table[1][i]);
1426 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1427 unsigned long start)
1430 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1431 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1432 if (addr == start) {
1433 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1438 /* update the TLB corresponding to virtual page vaddr and phys addr
1439 addr so that it is no longer dirty */
1440 static inline void tlb_set_dirty(CPUState *env,
1441 unsigned long addr, target_ulong vaddr)
1445 addr &= TARGET_PAGE_MASK;
1446 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1447 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1448 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1451 /* add a new TLB entry. At most one entry for a given virtual address
1452 is permitted. Return 0 if OK or 2 if the page could not be mapped
1453 (can only happen in non SOFTMMU mode for I/O pages or pages
1454 conflicting with the host address space). */
1455 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1456 target_phys_addr_t paddr, int prot,
1457 int is_user, int is_softmmu)
1462 target_ulong address;
1463 target_phys_addr_t addend;
1467 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1469 pd = IO_MEM_UNASSIGNED;
1471 pd = p->phys_offset;
1473 #if defined(DEBUG_TLB)
1474 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1475 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1479 #if !defined(CONFIG_SOFTMMU)
1483 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1484 /* IO memory case */
1485 address = vaddr | pd;
1488 /* standard memory */
1490 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1493 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1495 te = &env->tlb_table[is_user][index];
1496 te->addend = addend;
1497 if (prot & PAGE_READ) {
1498 te->addr_read = address;
1502 if (prot & PAGE_EXEC) {
1503 te->addr_code = address;
1507 if (prot & PAGE_WRITE) {
1508 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1509 /* ROM: access is ignored (same as unassigned) */
1510 te->addr_write = vaddr | IO_MEM_ROM;
1511 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1512 !cpu_physical_memory_is_dirty(pd)) {
1513 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1515 te->addr_write = address;
1518 te->addr_write = -1;
1521 #if !defined(CONFIG_SOFTMMU)
1523 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1524 /* IO access: no mapping is done as it will be handled by the
1526 if (!(env->hflags & HF_SOFTMMU_MASK))
1531 if (vaddr >= MMAP_AREA_END) {
1534 if (prot & PROT_WRITE) {
1535 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1536 #if defined(TARGET_HAS_SMC) || 1
1539 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1540 !cpu_physical_memory_is_dirty(pd))) {
1541 /* ROM: we do as if code was inside */
1542 /* if code is present, we only map as read only and save the
1546 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1549 vp->valid_tag = virt_valid_tag;
1550 prot &= ~PAGE_WRITE;
1553 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1554 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1555 if (map_addr == MAP_FAILED) {
1556 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1566 /* called from signal handler: invalidate the code and unprotect the
1567 page. Return TRUE if the fault was succesfully handled. */
1568 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1570 #if !defined(CONFIG_SOFTMMU)
1573 #if defined(DEBUG_TLB)
1574 printf("page_unprotect: addr=0x%08x\n", addr);
1576 addr &= TARGET_PAGE_MASK;
1578 /* if it is not mapped, no need to worry here */
1579 if (addr >= MMAP_AREA_END)
1581 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1584 /* NOTE: in this case, validate_tag is _not_ tested as it
1585 validates only the code TLB */
1586 if (vp->valid_tag != virt_valid_tag)
1588 if (!(vp->prot & PAGE_WRITE))
1590 #if defined(DEBUG_TLB)
1591 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1592 addr, vp->phys_addr, vp->prot);
1594 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1595 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1596 (unsigned long)addr, vp->prot);
1597 /* set the dirty bit */
1598 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1599 /* flush the code inside */
1600 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1609 void tlb_flush(CPUState *env, int flush_global)
1613 void tlb_flush_page(CPUState *env, target_ulong addr)
1617 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1618 target_phys_addr_t paddr, int prot,
1619 int is_user, int is_softmmu)
1624 /* dump memory mappings */
1625 void page_dump(FILE *f)
1627 unsigned long start, end;
1628 int i, j, prot, prot1;
1631 fprintf(f, "%-8s %-8s %-8s %s\n",
1632 "start", "end", "size", "prot");
1636 for(i = 0; i <= L1_SIZE; i++) {
1641 for(j = 0;j < L2_SIZE; j++) {
1646 if (prot1 != prot) {
1647 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1649 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1650 start, end, end - start,
1651 prot & PAGE_READ ? 'r' : '-',
1652 prot & PAGE_WRITE ? 'w' : '-',
1653 prot & PAGE_EXEC ? 'x' : '-');
1667 int page_get_flags(target_ulong address)
1671 p = page_find(address >> TARGET_PAGE_BITS);
1677 /* modify the flags of a page and invalidate the code if
1678 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1679 depending on PAGE_WRITE */
1680 void page_set_flags(target_ulong start, target_ulong end, int flags)
1685 start = start & TARGET_PAGE_MASK;
1686 end = TARGET_PAGE_ALIGN(end);
1687 if (flags & PAGE_WRITE)
1688 flags |= PAGE_WRITE_ORG;
1689 spin_lock(&tb_lock);
1690 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1691 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1692 /* if the write protection is set, then we invalidate the code
1694 if (!(p->flags & PAGE_WRITE) &&
1695 (flags & PAGE_WRITE) &&
1697 tb_invalidate_phys_page(addr, 0, NULL);
1701 spin_unlock(&tb_lock);
1704 /* called from signal handler: invalidate the code and unprotect the
1705 page. Return TRUE if the fault was succesfully handled. */
1706 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1708 unsigned int page_index, prot, pindex;
1710 target_ulong host_start, host_end, addr;
1712 host_start = address & qemu_host_page_mask;
1713 page_index = host_start >> TARGET_PAGE_BITS;
1714 p1 = page_find(page_index);
1717 host_end = host_start + qemu_host_page_size;
1720 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1724 /* if the page was really writable, then we change its
1725 protection back to writable */
1726 if (prot & PAGE_WRITE_ORG) {
1727 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1728 if (!(p1[pindex].flags & PAGE_WRITE)) {
1729 mprotect((void *)g2h(host_start), qemu_host_page_size,
1730 (prot & PAGE_BITS) | PAGE_WRITE);
1731 p1[pindex].flags |= PAGE_WRITE;
1732 /* and since the content will be modified, we must invalidate
1733 the corresponding translated code. */
1734 tb_invalidate_phys_page(address, pc, puc);
1735 #ifdef DEBUG_TB_CHECK
1736 tb_invalidate_check(address);
1744 /* call this function when system calls directly modify a memory area */
1745 /* ??? This should be redundant now we have lock_user. */
1746 void page_unprotect_range(target_ulong data, target_ulong data_size)
1748 target_ulong start, end, addr;
1751 end = start + data_size;
1752 start &= TARGET_PAGE_MASK;
1753 end = TARGET_PAGE_ALIGN(end);
1754 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1755 page_unprotect(addr, 0, NULL);
1759 static inline void tlb_set_dirty(CPUState *env,
1760 unsigned long addr, target_ulong vaddr)
1763 #endif /* defined(CONFIG_USER_ONLY) */
1765 /* register physical memory. 'size' must be a multiple of the target
1766 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1768 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1770 unsigned long phys_offset)
1772 target_phys_addr_t addr, end_addr;
1775 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1776 end_addr = start_addr + size;
1777 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1778 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1779 p->phys_offset = phys_offset;
1780 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1781 phys_offset += TARGET_PAGE_SIZE;
1785 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1790 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1794 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1795 unassigned_mem_readb,
1796 unassigned_mem_readb,
1797 unassigned_mem_readb,
1800 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1801 unassigned_mem_writeb,
1802 unassigned_mem_writeb,
1803 unassigned_mem_writeb,
1806 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1808 unsigned long ram_addr;
1810 ram_addr = addr - (unsigned long)phys_ram_base;
1811 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1812 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1813 #if !defined(CONFIG_USER_ONLY)
1814 tb_invalidate_phys_page_fast(ram_addr, 1);
1815 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1818 stb_p((uint8_t *)(long)addr, val);
1820 if (cpu_single_env->kqemu_enabled &&
1821 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1822 kqemu_modify_page(cpu_single_env, ram_addr);
1824 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1825 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1826 /* we remove the notdirty callback only if the code has been
1828 if (dirty_flags == 0xff)
1829 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1832 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1834 unsigned long ram_addr;
1836 ram_addr = addr - (unsigned long)phys_ram_base;
1837 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1838 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1839 #if !defined(CONFIG_USER_ONLY)
1840 tb_invalidate_phys_page_fast(ram_addr, 2);
1841 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1844 stw_p((uint8_t *)(long)addr, val);
1846 if (cpu_single_env->kqemu_enabled &&
1847 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1848 kqemu_modify_page(cpu_single_env, ram_addr);
1850 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1851 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1852 /* we remove the notdirty callback only if the code has been
1854 if (dirty_flags == 0xff)
1855 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1858 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1860 unsigned long ram_addr;
1862 ram_addr = addr - (unsigned long)phys_ram_base;
1863 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1864 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1865 #if !defined(CONFIG_USER_ONLY)
1866 tb_invalidate_phys_page_fast(ram_addr, 4);
1867 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1870 stl_p((uint8_t *)(long)addr, val);
1872 if (cpu_single_env->kqemu_enabled &&
1873 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1874 kqemu_modify_page(cpu_single_env, ram_addr);
1876 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1877 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1878 /* we remove the notdirty callback only if the code has been
1880 if (dirty_flags == 0xff)
1881 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1884 static CPUReadMemoryFunc *error_mem_read[3] = {
1885 NULL, /* never used */
1886 NULL, /* never used */
1887 NULL, /* never used */
1890 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1891 notdirty_mem_writeb,
1892 notdirty_mem_writew,
1893 notdirty_mem_writel,
1896 static void io_mem_init(void)
1898 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1899 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1900 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1903 /* alloc dirty bits array */
1904 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1905 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1908 /* mem_read and mem_write are arrays of functions containing the
1909 function to access byte (index 0), word (index 1) and dword (index
1910 2). All functions must be supplied. If io_index is non zero, the
1911 corresponding io zone is modified. If it is zero, a new io zone is
1912 allocated. The return value can be used with
1913 cpu_register_physical_memory(). (-1) is returned if error. */
1914 int cpu_register_io_memory(int io_index,
1915 CPUReadMemoryFunc **mem_read,
1916 CPUWriteMemoryFunc **mem_write,
1921 if (io_index <= 0) {
1922 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1924 io_index = io_mem_nb++;
1926 if (io_index >= IO_MEM_NB_ENTRIES)
1930 for(i = 0;i < 3; i++) {
1931 io_mem_read[io_index][i] = mem_read[i];
1932 io_mem_write[io_index][i] = mem_write[i];
1934 io_mem_opaque[io_index] = opaque;
1935 return io_index << IO_MEM_SHIFT;
1938 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1940 return io_mem_write[io_index >> IO_MEM_SHIFT];
1943 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1945 return io_mem_read[io_index >> IO_MEM_SHIFT];
1948 /* physical memory access (slow version, mainly for debug) */
1949 #if defined(CONFIG_USER_ONLY)
1950 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1951 int len, int is_write)
1958 page = addr & TARGET_PAGE_MASK;
1959 l = (page + TARGET_PAGE_SIZE) - addr;
1962 flags = page_get_flags(page);
1963 if (!(flags & PAGE_VALID))
1966 if (!(flags & PAGE_WRITE))
1968 p = lock_user(addr, len, 0);
1969 memcpy(p, buf, len);
1970 unlock_user(p, addr, len);
1972 if (!(flags & PAGE_READ))
1974 p = lock_user(addr, len, 1);
1975 memcpy(buf, p, len);
1976 unlock_user(p, addr, 0);
1985 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1986 int len, int is_write)
1991 target_phys_addr_t page;
1996 page = addr & TARGET_PAGE_MASK;
1997 l = (page + TARGET_PAGE_SIZE) - addr;
2000 p = phys_page_find(page >> TARGET_PAGE_BITS);
2002 pd = IO_MEM_UNASSIGNED;
2004 pd = p->phys_offset;
2008 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2009 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2010 /* XXX: could force cpu_single_env to NULL to avoid
2012 if (l >= 4 && ((addr & 3) == 0)) {
2013 /* 32 bit write access */
2015 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2017 } else if (l >= 2 && ((addr & 1) == 0)) {
2018 /* 16 bit write access */
2020 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2023 /* 8 bit write access */
2025 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2029 unsigned long addr1;
2030 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2032 ptr = phys_ram_base + addr1;
2033 memcpy(ptr, buf, l);
2034 if (!cpu_physical_memory_is_dirty(addr1)) {
2035 /* invalidate code */
2036 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2038 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2039 (0xff & ~CODE_DIRTY_FLAG);
2043 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2045 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2046 if (l >= 4 && ((addr & 3) == 0)) {
2047 /* 32 bit read access */
2048 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2051 } else if (l >= 2 && ((addr & 1) == 0)) {
2052 /* 16 bit read access */
2053 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2057 /* 8 bit read access */
2058 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2064 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2065 (addr & ~TARGET_PAGE_MASK);
2066 memcpy(buf, ptr, l);
2075 /* warning: addr must be aligned */
2076 uint32_t ldl_phys(target_phys_addr_t addr)
2084 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2086 pd = IO_MEM_UNASSIGNED;
2088 pd = p->phys_offset;
2091 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2093 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2094 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2097 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2098 (addr & ~TARGET_PAGE_MASK);
2104 /* warning: addr must be aligned */
2105 uint64_t ldq_phys(target_phys_addr_t addr)
2113 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2115 pd = IO_MEM_UNASSIGNED;
2117 pd = p->phys_offset;
2120 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2122 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2123 #ifdef TARGET_WORDS_BIGENDIAN
2124 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2125 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2127 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2128 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2132 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2133 (addr & ~TARGET_PAGE_MASK);
2140 uint32_t ldub_phys(target_phys_addr_t addr)
2143 cpu_physical_memory_read(addr, &val, 1);
2148 uint32_t lduw_phys(target_phys_addr_t addr)
2151 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2152 return tswap16(val);
2155 /* warning: addr must be aligned. The ram page is not masked as dirty
2156 and the code inside is not invalidated. It is useful if the dirty
2157 bits are used to track modified PTEs */
2158 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2165 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2167 pd = IO_MEM_UNASSIGNED;
2169 pd = p->phys_offset;
2172 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2173 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2174 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2176 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2177 (addr & ~TARGET_PAGE_MASK);
2182 /* warning: addr must be aligned */
2183 void stl_phys(target_phys_addr_t addr, uint32_t val)
2190 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2192 pd = IO_MEM_UNASSIGNED;
2194 pd = p->phys_offset;
2197 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2198 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2199 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2201 unsigned long addr1;
2202 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2204 ptr = phys_ram_base + addr1;
2206 if (!cpu_physical_memory_is_dirty(addr1)) {
2207 /* invalidate code */
2208 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2210 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2211 (0xff & ~CODE_DIRTY_FLAG);
2217 void stb_phys(target_phys_addr_t addr, uint32_t val)
2220 cpu_physical_memory_write(addr, &v, 1);
2224 void stw_phys(target_phys_addr_t addr, uint32_t val)
2226 uint16_t v = tswap16(val);
2227 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2231 void stq_phys(target_phys_addr_t addr, uint64_t val)
2234 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2239 /* virtual memory access for debug */
2240 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2241 uint8_t *buf, int len, int is_write)
2244 target_ulong page, phys_addr;
2247 page = addr & TARGET_PAGE_MASK;
2248 phys_addr = cpu_get_phys_page_debug(env, page);
2249 /* if no physical page mapped, return an error */
2250 if (phys_addr == -1)
2252 l = (page + TARGET_PAGE_SIZE) - addr;
2255 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2264 void dump_exec_info(FILE *f,
2265 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2267 int i, target_code_size, max_target_code_size;
2268 int direct_jmp_count, direct_jmp2_count, cross_page;
2269 TranslationBlock *tb;
2271 target_code_size = 0;
2272 max_target_code_size = 0;
2274 direct_jmp_count = 0;
2275 direct_jmp2_count = 0;
2276 for(i = 0; i < nb_tbs; i++) {
2278 target_code_size += tb->size;
2279 if (tb->size > max_target_code_size)
2280 max_target_code_size = tb->size;
2281 if (tb->page_addr[1] != -1)
2283 if (tb->tb_next_offset[0] != 0xffff) {
2285 if (tb->tb_next_offset[1] != 0xffff) {
2286 direct_jmp2_count++;
2290 /* XXX: avoid using doubles ? */
2291 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2292 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2293 nb_tbs ? target_code_size / nb_tbs : 0,
2294 max_target_code_size);
2295 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2296 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2297 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2298 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2300 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2301 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2303 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2305 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2306 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2307 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2308 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2311 #if !defined(CONFIG_USER_ONLY)
2313 #define MMUSUFFIX _cmmu
2314 #define GETPC() NULL
2315 #define env cpu_single_env
2316 #define SOFTMMU_CODE_ACCESS
2319 #include "softmmu_template.h"
2322 #include "softmmu_template.h"
2325 #include "softmmu_template.h"
2328 #include "softmmu_template.h"