2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 /* threshold to flush the translated code buffer */
62 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
88 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 /* any access to the tbs or the page table must use this lock */
91 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
94 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
95 uint8_t *code_gen_ptr;
97 ram_addr_t phys_ram_size;
99 uint8_t *phys_ram_base;
100 uint8_t *phys_ram_dirty;
101 static ram_addr_t phys_ram_alloc_offset = 0;
104 /* current CPU in the current thread. It is only valid inside
106 CPUState *cpu_single_env;
108 typedef struct PageDesc {
109 /* list of TBs intersecting this ram page */
110 TranslationBlock *first_tb;
111 /* in order to optimize self modifying code, we count the number
112 of lookups we do to a given page to use a bitmap */
113 unsigned int code_write_count;
114 uint8_t *code_bitmap;
115 #if defined(CONFIG_USER_ONLY)
120 typedef struct PhysPageDesc {
121 /* offset in host memory of the page + io_index in the low 12 bits */
122 ram_addr_t phys_offset;
126 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
127 /* XXX: this is a temporary hack for alpha target.
128 * In the future, this is to be replaced by a multi-level table
129 * to actually be able to handle the complete 64 bits address space.
131 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
136 #define L1_SIZE (1 << L1_BITS)
137 #define L2_SIZE (1 << L2_BITS)
139 static void io_mem_init(void);
141 unsigned long qemu_real_host_page_size;
142 unsigned long qemu_host_page_bits;
143 unsigned long qemu_host_page_size;
144 unsigned long qemu_host_page_mask;
146 /* XXX: for system emulation, it could just be an array */
147 static PageDesc *l1_map[L1_SIZE];
148 PhysPageDesc **l1_phys_map;
150 /* io memory support */
151 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
152 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
153 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
154 static int io_mem_nb;
155 #if defined(CONFIG_SOFTMMU)
156 static int io_mem_watch;
160 char *logfilename = "/tmp/qemu.log";
163 static int log_append = 0;
166 static int tlb_flush_count;
167 static int tb_flush_count;
168 static int tb_phys_invalidate_count;
170 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
171 typedef struct subpage_t {
172 target_phys_addr_t base;
173 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
174 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
175 void *opaque[TARGET_PAGE_SIZE][2][4];
179 static void map_exec(void *addr, long size)
182 VirtualProtect(addr, size,
183 PAGE_EXECUTE_READWRITE, &old_protect);
187 static void map_exec(void *addr, long size)
189 unsigned long start, end;
191 start = (unsigned long)addr;
192 start &= ~(qemu_real_host_page_size - 1);
194 end = (unsigned long)addr + size;
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
198 mprotect((void *)start, end - start,
199 PROT_READ | PROT_WRITE | PROT_EXEC);
203 static void page_init(void)
205 /* NOTE: we can always suppose that qemu_host_page_size >=
209 SYSTEM_INFO system_info;
212 GetSystemInfo(&system_info);
213 qemu_real_host_page_size = system_info.dwPageSize;
216 qemu_real_host_page_size = getpagesize();
218 map_exec(code_gen_buffer, sizeof(code_gen_buffer));
219 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
221 if (qemu_host_page_size == 0)
222 qemu_host_page_size = qemu_real_host_page_size;
223 if (qemu_host_page_size < TARGET_PAGE_SIZE)
224 qemu_host_page_size = TARGET_PAGE_SIZE;
225 qemu_host_page_bits = 0;
226 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
227 qemu_host_page_bits++;
228 qemu_host_page_mask = ~(qemu_host_page_size - 1);
229 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
230 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
232 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
234 long long startaddr, endaddr;
238 f = fopen("/proc/self/maps", "r");
241 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
243 startaddr = MIN(startaddr,
244 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
245 endaddr = MIN(endaddr,
246 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
247 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
248 TARGET_PAGE_ALIGN(endaddr),
258 static inline PageDesc *page_find_alloc(target_ulong index)
262 lp = &l1_map[index >> L2_BITS];
265 /* allocate if not found */
266 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
267 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
270 return p + (index & (L2_SIZE - 1));
273 static inline PageDesc *page_find(target_ulong index)
277 p = l1_map[index >> L2_BITS];
280 return p + (index & (L2_SIZE - 1));
283 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
288 p = (void **)l1_phys_map;
289 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
291 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
292 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
294 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
297 /* allocate if not found */
300 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
301 memset(p, 0, sizeof(void *) * L1_SIZE);
305 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
309 /* allocate if not found */
312 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
314 for (i = 0; i < L2_SIZE; i++)
315 pd[i].phys_offset = IO_MEM_UNASSIGNED;
317 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
320 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
322 return phys_page_find_alloc(index, 0);
325 #if !defined(CONFIG_USER_ONLY)
326 static void tlb_protect_code(ram_addr_t ram_addr);
327 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
331 void cpu_exec_init(CPUState *env)
338 code_gen_ptr = code_gen_buffer;
342 env->next_cpu = NULL;
345 while (*penv != NULL) {
346 penv = (CPUState **)&(*penv)->next_cpu;
349 env->cpu_index = cpu_index;
350 env->nb_watchpoints = 0;
354 static inline void invalidate_page_bitmap(PageDesc *p)
356 if (p->code_bitmap) {
357 qemu_free(p->code_bitmap);
358 p->code_bitmap = NULL;
360 p->code_write_count = 0;
363 /* set to NULL all the 'first_tb' fields in all PageDescs */
364 static void page_flush_tb(void)
369 for(i = 0; i < L1_SIZE; i++) {
372 for(j = 0; j < L2_SIZE; j++) {
374 invalidate_page_bitmap(p);
381 /* flush all the translation blocks */
382 /* XXX: tb_flush is currently not thread safe */
383 void tb_flush(CPUState *env1)
386 #if defined(DEBUG_FLUSH)
387 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
388 (unsigned long)(code_gen_ptr - code_gen_buffer),
390 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
392 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
393 cpu_abort(env1, "Internal error: code buffer overflow\n");
397 for(env = first_cpu; env != NULL; env = env->next_cpu) {
398 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
401 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
404 code_gen_ptr = code_gen_buffer;
405 /* XXX: flush processor icache at this point if cache flush is
410 #ifdef DEBUG_TB_CHECK
412 static void tb_invalidate_check(target_ulong address)
414 TranslationBlock *tb;
416 address &= TARGET_PAGE_MASK;
417 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
418 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
419 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
420 address >= tb->pc + tb->size)) {
421 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
422 address, (long)tb->pc, tb->size);
428 /* verify that all the pages have correct rights for code */
429 static void tb_page_check(void)
431 TranslationBlock *tb;
432 int i, flags1, flags2;
434 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
435 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
436 flags1 = page_get_flags(tb->pc);
437 flags2 = page_get_flags(tb->pc + tb->size - 1);
438 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
439 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
440 (long)tb->pc, tb->size, flags1, flags2);
446 void tb_jmp_check(TranslationBlock *tb)
448 TranslationBlock *tb1;
451 /* suppress any remaining jumps to this TB */
455 tb1 = (TranslationBlock *)((long)tb1 & ~3);
458 tb1 = tb1->jmp_next[n1];
460 /* check end of list */
462 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
468 /* invalidate one TB */
469 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
472 TranslationBlock *tb1;
476 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
479 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
483 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
485 TranslationBlock *tb1;
491 tb1 = (TranslationBlock *)((long)tb1 & ~3);
493 *ptb = tb1->page_next[n1];
496 ptb = &tb1->page_next[n1];
500 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
502 TranslationBlock *tb1, **ptb;
505 ptb = &tb->jmp_next[n];
508 /* find tb(n) in circular list */
512 tb1 = (TranslationBlock *)((long)tb1 & ~3);
513 if (n1 == n && tb1 == tb)
516 ptb = &tb1->jmp_first;
518 ptb = &tb1->jmp_next[n1];
521 /* now we can suppress tb(n) from the list */
522 *ptb = tb->jmp_next[n];
524 tb->jmp_next[n] = NULL;
528 /* reset the jump entry 'n' of a TB so that it is not chained to
530 static inline void tb_reset_jump(TranslationBlock *tb, int n)
532 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
535 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
540 target_phys_addr_t phys_pc;
541 TranslationBlock *tb1, *tb2;
543 /* remove the TB from the hash list */
544 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
545 h = tb_phys_hash_func(phys_pc);
546 tb_remove(&tb_phys_hash[h], tb,
547 offsetof(TranslationBlock, phys_hash_next));
549 /* remove the TB from the page list */
550 if (tb->page_addr[0] != page_addr) {
551 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
552 tb_page_remove(&p->first_tb, tb);
553 invalidate_page_bitmap(p);
555 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
556 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
557 tb_page_remove(&p->first_tb, tb);
558 invalidate_page_bitmap(p);
561 tb_invalidated_flag = 1;
563 /* remove the TB from the hash list */
564 h = tb_jmp_cache_hash_func(tb->pc);
565 for(env = first_cpu; env != NULL; env = env->next_cpu) {
566 if (env->tb_jmp_cache[h] == tb)
567 env->tb_jmp_cache[h] = NULL;
570 /* suppress this TB from the two jump lists */
571 tb_jmp_remove(tb, 0);
572 tb_jmp_remove(tb, 1);
574 /* suppress any remaining jumps to this TB */
580 tb1 = (TranslationBlock *)((long)tb1 & ~3);
581 tb2 = tb1->jmp_next[n1];
582 tb_reset_jump(tb1, n1);
583 tb1->jmp_next[n1] = NULL;
586 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
588 tb_phys_invalidate_count++;
591 static inline void set_bits(uint8_t *tab, int start, int len)
597 mask = 0xff << (start & 7);
598 if ((start & ~7) == (end & ~7)) {
600 mask &= ~(0xff << (end & 7));
605 start = (start + 8) & ~7;
607 while (start < end1) {
612 mask = ~(0xff << (end & 7));
618 static void build_page_bitmap(PageDesc *p)
620 int n, tb_start, tb_end;
621 TranslationBlock *tb;
623 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
626 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
631 tb = (TranslationBlock *)((long)tb & ~3);
632 /* NOTE: this is subtle as a TB may span two physical pages */
634 /* NOTE: tb_end may be after the end of the page, but
635 it is not a problem */
636 tb_start = tb->pc & ~TARGET_PAGE_MASK;
637 tb_end = tb_start + tb->size;
638 if (tb_end > TARGET_PAGE_SIZE)
639 tb_end = TARGET_PAGE_SIZE;
642 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
644 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
645 tb = tb->page_next[n];
649 #ifdef TARGET_HAS_PRECISE_SMC
651 static void tb_gen_code(CPUState *env,
652 target_ulong pc, target_ulong cs_base, int flags,
655 TranslationBlock *tb;
657 target_ulong phys_pc, phys_page2, virt_page2;
660 phys_pc = get_phys_addr_code(env, pc);
663 /* flush must be done */
665 /* cannot fail at this point */
668 tc_ptr = code_gen_ptr;
670 tb->cs_base = cs_base;
673 cpu_gen_code(env, tb, &code_gen_size);
674 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
676 /* check next page if needed */
677 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
679 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
680 phys_page2 = get_phys_addr_code(env, virt_page2);
682 tb_link_phys(tb, phys_pc, phys_page2);
686 /* invalidate all TBs which intersect with the target physical page
687 starting in range [start;end[. NOTE: start and end must refer to
688 the same physical page. 'is_cpu_write_access' should be true if called
689 from a real cpu write access: the virtual CPU will exit the current
690 TB if code is modified inside this TB. */
691 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
692 int is_cpu_write_access)
694 int n, current_tb_modified, current_tb_not_found, current_flags;
695 CPUState *env = cpu_single_env;
697 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
698 target_ulong tb_start, tb_end;
699 target_ulong current_pc, current_cs_base;
701 p = page_find(start >> TARGET_PAGE_BITS);
704 if (!p->code_bitmap &&
705 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
706 is_cpu_write_access) {
707 /* build code bitmap */
708 build_page_bitmap(p);
711 /* we remove all the TBs in the range [start, end[ */
712 /* XXX: see if in some cases it could be faster to invalidate all the code */
713 current_tb_not_found = is_cpu_write_access;
714 current_tb_modified = 0;
715 current_tb = NULL; /* avoid warning */
716 current_pc = 0; /* avoid warning */
717 current_cs_base = 0; /* avoid warning */
718 current_flags = 0; /* avoid warning */
722 tb = (TranslationBlock *)((long)tb & ~3);
723 tb_next = tb->page_next[n];
724 /* NOTE: this is subtle as a TB may span two physical pages */
726 /* NOTE: tb_end may be after the end of the page, but
727 it is not a problem */
728 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
729 tb_end = tb_start + tb->size;
731 tb_start = tb->page_addr[1];
732 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
734 if (!(tb_end <= start || tb_start >= end)) {
735 #ifdef TARGET_HAS_PRECISE_SMC
736 if (current_tb_not_found) {
737 current_tb_not_found = 0;
739 if (env->mem_write_pc) {
740 /* now we have a real cpu fault */
741 current_tb = tb_find_pc(env->mem_write_pc);
744 if (current_tb == tb &&
745 !(current_tb->cflags & CF_SINGLE_INSN)) {
746 /* If we are modifying the current TB, we must stop
747 its execution. We could be more precise by checking
748 that the modification is after the current PC, but it
749 would require a specialized function to partially
750 restore the CPU state */
752 current_tb_modified = 1;
753 cpu_restore_state(current_tb, env,
754 env->mem_write_pc, NULL);
755 #if defined(TARGET_I386)
756 current_flags = env->hflags;
757 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
758 current_cs_base = (target_ulong)env->segs[R_CS].base;
759 current_pc = current_cs_base + env->eip;
761 #error unsupported CPU
764 #endif /* TARGET_HAS_PRECISE_SMC */
765 /* we need to do that to handle the case where a signal
766 occurs while doing tb_phys_invalidate() */
769 saved_tb = env->current_tb;
770 env->current_tb = NULL;
772 tb_phys_invalidate(tb, -1);
774 env->current_tb = saved_tb;
775 if (env->interrupt_request && env->current_tb)
776 cpu_interrupt(env, env->interrupt_request);
781 #if !defined(CONFIG_USER_ONLY)
782 /* if no code remaining, no need to continue to use slow writes */
784 invalidate_page_bitmap(p);
785 if (is_cpu_write_access) {
786 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
790 #ifdef TARGET_HAS_PRECISE_SMC
791 if (current_tb_modified) {
792 /* we generate a block containing just the instruction
793 modifying the memory. It will ensure that it cannot modify
795 env->current_tb = NULL;
796 tb_gen_code(env, current_pc, current_cs_base, current_flags,
798 cpu_resume_from_signal(env, NULL);
803 /* len must be <= 8 and start must be a multiple of len */
804 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
811 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
812 cpu_single_env->mem_write_vaddr, len,
814 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
818 p = page_find(start >> TARGET_PAGE_BITS);
821 if (p->code_bitmap) {
822 offset = start & ~TARGET_PAGE_MASK;
823 b = p->code_bitmap[offset >> 3] >> (offset & 7);
824 if (b & ((1 << len) - 1))
828 tb_invalidate_phys_page_range(start, start + len, 1);
832 #if !defined(CONFIG_SOFTMMU)
833 static void tb_invalidate_phys_page(target_phys_addr_t addr,
834 unsigned long pc, void *puc)
836 int n, current_flags, current_tb_modified;
837 target_ulong current_pc, current_cs_base;
839 TranslationBlock *tb, *current_tb;
840 #ifdef TARGET_HAS_PRECISE_SMC
841 CPUState *env = cpu_single_env;
844 addr &= TARGET_PAGE_MASK;
845 p = page_find(addr >> TARGET_PAGE_BITS);
849 current_tb_modified = 0;
851 current_pc = 0; /* avoid warning */
852 current_cs_base = 0; /* avoid warning */
853 current_flags = 0; /* avoid warning */
854 #ifdef TARGET_HAS_PRECISE_SMC
856 current_tb = tb_find_pc(pc);
861 tb = (TranslationBlock *)((long)tb & ~3);
862 #ifdef TARGET_HAS_PRECISE_SMC
863 if (current_tb == tb &&
864 !(current_tb->cflags & CF_SINGLE_INSN)) {
865 /* If we are modifying the current TB, we must stop
866 its execution. We could be more precise by checking
867 that the modification is after the current PC, but it
868 would require a specialized function to partially
869 restore the CPU state */
871 current_tb_modified = 1;
872 cpu_restore_state(current_tb, env, pc, puc);
873 #if defined(TARGET_I386)
874 current_flags = env->hflags;
875 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
876 current_cs_base = (target_ulong)env->segs[R_CS].base;
877 current_pc = current_cs_base + env->eip;
879 #error unsupported CPU
882 #endif /* TARGET_HAS_PRECISE_SMC */
883 tb_phys_invalidate(tb, addr);
884 tb = tb->page_next[n];
887 #ifdef TARGET_HAS_PRECISE_SMC
888 if (current_tb_modified) {
889 /* we generate a block containing just the instruction
890 modifying the memory. It will ensure that it cannot modify
892 env->current_tb = NULL;
893 tb_gen_code(env, current_pc, current_cs_base, current_flags,
895 cpu_resume_from_signal(env, puc);
901 /* add the tb in the target page and protect it if necessary */
902 static inline void tb_alloc_page(TranslationBlock *tb,
903 unsigned int n, target_ulong page_addr)
906 TranslationBlock *last_first_tb;
908 tb->page_addr[n] = page_addr;
909 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
910 tb->page_next[n] = p->first_tb;
911 last_first_tb = p->first_tb;
912 p->first_tb = (TranslationBlock *)((long)tb | n);
913 invalidate_page_bitmap(p);
915 #if defined(TARGET_HAS_SMC) || 1
917 #if defined(CONFIG_USER_ONLY)
918 if (p->flags & PAGE_WRITE) {
923 /* force the host page as non writable (writes will have a
924 page fault + mprotect overhead) */
925 page_addr &= qemu_host_page_mask;
927 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
928 addr += TARGET_PAGE_SIZE) {
930 p2 = page_find (addr >> TARGET_PAGE_BITS);
934 p2->flags &= ~PAGE_WRITE;
935 page_get_flags(addr);
937 mprotect(g2h(page_addr), qemu_host_page_size,
938 (prot & PAGE_BITS) & ~PAGE_WRITE);
939 #ifdef DEBUG_TB_INVALIDATE
940 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
945 /* if some code is already present, then the pages are already
946 protected. So we handle the case where only the first TB is
947 allocated in a physical page */
948 if (!last_first_tb) {
949 tlb_protect_code(page_addr);
953 #endif /* TARGET_HAS_SMC */
956 /* Allocate a new translation block. Flush the translation buffer if
957 too many translation blocks or too much generated code. */
958 TranslationBlock *tb_alloc(target_ulong pc)
960 TranslationBlock *tb;
962 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
963 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
971 /* add a new TB and link it to the physical page tables. phys_page2 is
972 (-1) to indicate that only one page contains the TB. */
973 void tb_link_phys(TranslationBlock *tb,
974 target_ulong phys_pc, target_ulong phys_page2)
977 TranslationBlock **ptb;
979 /* add in the physical hash table */
980 h = tb_phys_hash_func(phys_pc);
981 ptb = &tb_phys_hash[h];
982 tb->phys_hash_next = *ptb;
985 /* add in the page list */
986 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
987 if (phys_page2 != -1)
988 tb_alloc_page(tb, 1, phys_page2);
990 tb->page_addr[1] = -1;
992 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
993 tb->jmp_next[0] = NULL;
994 tb->jmp_next[1] = NULL;
996 /* init original jump addresses */
997 if (tb->tb_next_offset[0] != 0xffff)
998 tb_reset_jump(tb, 0);
999 if (tb->tb_next_offset[1] != 0xffff)
1000 tb_reset_jump(tb, 1);
1002 #ifdef DEBUG_TB_CHECK
1007 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1008 tb[1].tc_ptr. Return NULL if not found */
1009 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1011 int m_min, m_max, m;
1013 TranslationBlock *tb;
1017 if (tc_ptr < (unsigned long)code_gen_buffer ||
1018 tc_ptr >= (unsigned long)code_gen_ptr)
1020 /* binary search (cf Knuth) */
1023 while (m_min <= m_max) {
1024 m = (m_min + m_max) >> 1;
1026 v = (unsigned long)tb->tc_ptr;
1029 else if (tc_ptr < v) {
1038 static void tb_reset_jump_recursive(TranslationBlock *tb);
1040 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1042 TranslationBlock *tb1, *tb_next, **ptb;
1045 tb1 = tb->jmp_next[n];
1047 /* find head of list */
1050 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1053 tb1 = tb1->jmp_next[n1];
1055 /* we are now sure now that tb jumps to tb1 */
1058 /* remove tb from the jmp_first list */
1059 ptb = &tb_next->jmp_first;
1063 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1064 if (n1 == n && tb1 == tb)
1066 ptb = &tb1->jmp_next[n1];
1068 *ptb = tb->jmp_next[n];
1069 tb->jmp_next[n] = NULL;
1071 /* suppress the jump to next tb in generated code */
1072 tb_reset_jump(tb, n);
1074 /* suppress jumps in the tb on which we could have jumped */
1075 tb_reset_jump_recursive(tb_next);
1079 static void tb_reset_jump_recursive(TranslationBlock *tb)
1081 tb_reset_jump_recursive2(tb, 0);
1082 tb_reset_jump_recursive2(tb, 1);
1085 #if defined(TARGET_HAS_ICE)
1086 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1088 target_phys_addr_t addr;
1090 ram_addr_t ram_addr;
1093 addr = cpu_get_phys_page_debug(env, pc);
1094 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1096 pd = IO_MEM_UNASSIGNED;
1098 pd = p->phys_offset;
1100 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1101 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1105 /* Add a watchpoint. */
1106 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1110 for (i = 0; i < env->nb_watchpoints; i++) {
1111 if (addr == env->watchpoint[i].vaddr)
1114 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1117 i = env->nb_watchpoints++;
1118 env->watchpoint[i].vaddr = addr;
1119 tlb_flush_page(env, addr);
1120 /* FIXME: This flush is needed because of the hack to make memory ops
1121 terminate the TB. It can be removed once the proper IO trap and
1122 re-execute bits are in. */
1127 /* Remove a watchpoint. */
1128 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1132 for (i = 0; i < env->nb_watchpoints; i++) {
1133 if (addr == env->watchpoint[i].vaddr) {
1134 env->nb_watchpoints--;
1135 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1136 tlb_flush_page(env, addr);
1143 /* Remove all watchpoints. */
1144 void cpu_watchpoint_remove_all(CPUState *env) {
1147 for (i = 0; i < env->nb_watchpoints; i++) {
1148 tlb_flush_page(env, env->watchpoint[i].vaddr);
1150 env->nb_watchpoints = 0;
1153 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1154 breakpoint is reached */
1155 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1157 #if defined(TARGET_HAS_ICE)
1160 for(i = 0; i < env->nb_breakpoints; i++) {
1161 if (env->breakpoints[i] == pc)
1165 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1167 env->breakpoints[env->nb_breakpoints++] = pc;
1169 breakpoint_invalidate(env, pc);
1176 /* remove all breakpoints */
1177 void cpu_breakpoint_remove_all(CPUState *env) {
1178 #if defined(TARGET_HAS_ICE)
1180 for(i = 0; i < env->nb_breakpoints; i++) {
1181 breakpoint_invalidate(env, env->breakpoints[i]);
1183 env->nb_breakpoints = 0;
1187 /* remove a breakpoint */
1188 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1190 #if defined(TARGET_HAS_ICE)
1192 for(i = 0; i < env->nb_breakpoints; i++) {
1193 if (env->breakpoints[i] == pc)
1198 env->nb_breakpoints--;
1199 if (i < env->nb_breakpoints)
1200 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1202 breakpoint_invalidate(env, pc);
1209 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1210 CPU loop after each instruction */
1211 void cpu_single_step(CPUState *env, int enabled)
1213 #if defined(TARGET_HAS_ICE)
1214 if (env->singlestep_enabled != enabled) {
1215 env->singlestep_enabled = enabled;
1216 /* must flush all the translated code to avoid inconsistancies */
1217 /* XXX: only flush what is necessary */
1223 /* enable or disable low levels log */
1224 void cpu_set_log(int log_flags)
1226 loglevel = log_flags;
1227 if (loglevel && !logfile) {
1228 logfile = fopen(logfilename, log_append ? "a" : "w");
1230 perror(logfilename);
1233 #if !defined(CONFIG_SOFTMMU)
1234 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1236 static uint8_t logfile_buf[4096];
1237 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1240 setvbuf(logfile, NULL, _IOLBF, 0);
1244 if (!loglevel && logfile) {
1250 void cpu_set_log_filename(const char *filename)
1252 logfilename = strdup(filename);
1257 cpu_set_log(loglevel);
1260 /* mask must never be zero, except for A20 change call */
1261 void cpu_interrupt(CPUState *env, int mask)
1263 TranslationBlock *tb;
1264 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1266 env->interrupt_request |= mask;
1267 /* if the cpu is currently executing code, we must unlink it and
1268 all the potentially executing TB */
1269 tb = env->current_tb;
1270 if (tb && !testandset(&interrupt_lock)) {
1271 env->current_tb = NULL;
1272 tb_reset_jump_recursive(tb);
1273 resetlock(&interrupt_lock);
1277 void cpu_reset_interrupt(CPUState *env, int mask)
1279 env->interrupt_request &= ~mask;
1282 CPULogItem cpu_log_items[] = {
1283 { CPU_LOG_TB_OUT_ASM, "out_asm",
1284 "show generated host assembly code for each compiled TB" },
1285 { CPU_LOG_TB_IN_ASM, "in_asm",
1286 "show target assembly code for each compiled TB" },
1287 { CPU_LOG_TB_OP, "op",
1288 "show micro ops for each compiled TB" },
1289 { CPU_LOG_TB_OP_OPT, "op_opt",
1292 "before eflags optimization and "
1294 "after liveness analysis" },
1295 { CPU_LOG_INT, "int",
1296 "show interrupts/exceptions in short format" },
1297 { CPU_LOG_EXEC, "exec",
1298 "show trace before each executed TB (lots of logs)" },
1299 { CPU_LOG_TB_CPU, "cpu",
1300 "show CPU state before block translation" },
1302 { CPU_LOG_PCALL, "pcall",
1303 "show protected mode far calls/returns/exceptions" },
1306 { CPU_LOG_IOPORT, "ioport",
1307 "show all i/o ports accesses" },
1312 static int cmp1(const char *s1, int n, const char *s2)
1314 if (strlen(s2) != n)
1316 return memcmp(s1, s2, n) == 0;
1319 /* takes a comma separated list of log masks. Return 0 if error. */
1320 int cpu_str_to_log_mask(const char *str)
1329 p1 = strchr(p, ',');
1332 if(cmp1(p,p1-p,"all")) {
1333 for(item = cpu_log_items; item->mask != 0; item++) {
1337 for(item = cpu_log_items; item->mask != 0; item++) {
1338 if (cmp1(p, p1 - p, item->name))
1352 void cpu_abort(CPUState *env, const char *fmt, ...)
1359 fprintf(stderr, "qemu: fatal: ");
1360 vfprintf(stderr, fmt, ap);
1361 fprintf(stderr, "\n");
1363 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1365 cpu_dump_state(env, stderr, fprintf, 0);
1368 fprintf(logfile, "qemu: fatal: ");
1369 vfprintf(logfile, fmt, ap2);
1370 fprintf(logfile, "\n");
1372 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1374 cpu_dump_state(env, logfile, fprintf, 0);
1384 CPUState *cpu_copy(CPUState *env)
1386 CPUState *new_env = cpu_init(env->cpu_model_str);
1387 /* preserve chaining and index */
1388 CPUState *next_cpu = new_env->next_cpu;
1389 int cpu_index = new_env->cpu_index;
1390 memcpy(new_env, env, sizeof(CPUState));
1391 new_env->next_cpu = next_cpu;
1392 new_env->cpu_index = cpu_index;
1396 #if !defined(CONFIG_USER_ONLY)
1398 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1402 /* Discard jump cache entries for any tb which might potentially
1403 overlap the flushed page. */
1404 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1405 memset (&env->tb_jmp_cache[i], 0,
1406 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1408 i = tb_jmp_cache_hash_page(addr);
1409 memset (&env->tb_jmp_cache[i], 0,
1410 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1413 /* NOTE: if flush_global is true, also flush global entries (not
1415 void tlb_flush(CPUState *env, int flush_global)
1419 #if defined(DEBUG_TLB)
1420 printf("tlb_flush:\n");
1422 /* must reset current TB so that interrupts cannot modify the
1423 links while we are modifying them */
1424 env->current_tb = NULL;
1426 for(i = 0; i < CPU_TLB_SIZE; i++) {
1427 env->tlb_table[0][i].addr_read = -1;
1428 env->tlb_table[0][i].addr_write = -1;
1429 env->tlb_table[0][i].addr_code = -1;
1430 env->tlb_table[1][i].addr_read = -1;
1431 env->tlb_table[1][i].addr_write = -1;
1432 env->tlb_table[1][i].addr_code = -1;
1433 #if (NB_MMU_MODES >= 3)
1434 env->tlb_table[2][i].addr_read = -1;
1435 env->tlb_table[2][i].addr_write = -1;
1436 env->tlb_table[2][i].addr_code = -1;
1437 #if (NB_MMU_MODES == 4)
1438 env->tlb_table[3][i].addr_read = -1;
1439 env->tlb_table[3][i].addr_write = -1;
1440 env->tlb_table[3][i].addr_code = -1;
1445 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1447 #if !defined(CONFIG_SOFTMMU)
1448 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1451 if (env->kqemu_enabled) {
1452 kqemu_flush(env, flush_global);
1458 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1460 if (addr == (tlb_entry->addr_read &
1461 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1462 addr == (tlb_entry->addr_write &
1463 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1464 addr == (tlb_entry->addr_code &
1465 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1466 tlb_entry->addr_read = -1;
1467 tlb_entry->addr_write = -1;
1468 tlb_entry->addr_code = -1;
1472 void tlb_flush_page(CPUState *env, target_ulong addr)
1476 #if defined(DEBUG_TLB)
1477 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1479 /* must reset current TB so that interrupts cannot modify the
1480 links while we are modifying them */
1481 env->current_tb = NULL;
1483 addr &= TARGET_PAGE_MASK;
1484 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1485 tlb_flush_entry(&env->tlb_table[0][i], addr);
1486 tlb_flush_entry(&env->tlb_table[1][i], addr);
1487 #if (NB_MMU_MODES >= 3)
1488 tlb_flush_entry(&env->tlb_table[2][i], addr);
1489 #if (NB_MMU_MODES == 4)
1490 tlb_flush_entry(&env->tlb_table[3][i], addr);
1494 tlb_flush_jmp_cache(env, addr);
1496 #if !defined(CONFIG_SOFTMMU)
1497 if (addr < MMAP_AREA_END)
1498 munmap((void *)addr, TARGET_PAGE_SIZE);
1501 if (env->kqemu_enabled) {
1502 kqemu_flush_page(env, addr);
1507 /* update the TLBs so that writes to code in the virtual page 'addr'
1509 static void tlb_protect_code(ram_addr_t ram_addr)
1511 cpu_physical_memory_reset_dirty(ram_addr,
1512 ram_addr + TARGET_PAGE_SIZE,
1516 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1517 tested for self modifying code */
1518 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1521 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1524 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1525 unsigned long start, unsigned long length)
1528 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1529 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1530 if ((addr - start) < length) {
1531 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1536 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1540 unsigned long length, start1;
1544 start &= TARGET_PAGE_MASK;
1545 end = TARGET_PAGE_ALIGN(end);
1547 length = end - start;
1550 len = length >> TARGET_PAGE_BITS;
1552 /* XXX: should not depend on cpu context */
1554 if (env->kqemu_enabled) {
1557 for(i = 0; i < len; i++) {
1558 kqemu_set_notdirty(env, addr);
1559 addr += TARGET_PAGE_SIZE;
1563 mask = ~dirty_flags;
1564 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1565 for(i = 0; i < len; i++)
1568 /* we modify the TLB cache so that the dirty bit will be set again
1569 when accessing the range */
1570 start1 = start + (unsigned long)phys_ram_base;
1571 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1572 for(i = 0; i < CPU_TLB_SIZE; i++)
1573 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1574 for(i = 0; i < CPU_TLB_SIZE; i++)
1575 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1576 #if (NB_MMU_MODES >= 3)
1577 for(i = 0; i < CPU_TLB_SIZE; i++)
1578 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1579 #if (NB_MMU_MODES == 4)
1580 for(i = 0; i < CPU_TLB_SIZE; i++)
1581 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1586 #if !defined(CONFIG_SOFTMMU)
1587 /* XXX: this is expensive */
1593 for(i = 0; i < L1_SIZE; i++) {
1596 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1597 for(j = 0; j < L2_SIZE; j++) {
1598 if (p->valid_tag == virt_valid_tag &&
1599 p->phys_addr >= start && p->phys_addr < end &&
1600 (p->prot & PROT_WRITE)) {
1601 if (addr < MMAP_AREA_END) {
1602 mprotect((void *)addr, TARGET_PAGE_SIZE,
1603 p->prot & ~PROT_WRITE);
1606 addr += TARGET_PAGE_SIZE;
1615 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1617 ram_addr_t ram_addr;
1619 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1620 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1621 tlb_entry->addend - (unsigned long)phys_ram_base;
1622 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1623 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1628 /* update the TLB according to the current state of the dirty bits */
1629 void cpu_tlb_update_dirty(CPUState *env)
1632 for(i = 0; i < CPU_TLB_SIZE; i++)
1633 tlb_update_dirty(&env->tlb_table[0][i]);
1634 for(i = 0; i < CPU_TLB_SIZE; i++)
1635 tlb_update_dirty(&env->tlb_table[1][i]);
1636 #if (NB_MMU_MODES >= 3)
1637 for(i = 0; i < CPU_TLB_SIZE; i++)
1638 tlb_update_dirty(&env->tlb_table[2][i]);
1639 #if (NB_MMU_MODES == 4)
1640 for(i = 0; i < CPU_TLB_SIZE; i++)
1641 tlb_update_dirty(&env->tlb_table[3][i]);
1646 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1647 unsigned long start)
1650 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1651 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1652 if (addr == start) {
1653 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1658 /* update the TLB corresponding to virtual page vaddr and phys addr
1659 addr so that it is no longer dirty */
1660 static inline void tlb_set_dirty(CPUState *env,
1661 unsigned long addr, target_ulong vaddr)
1665 addr &= TARGET_PAGE_MASK;
1666 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1667 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1668 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1669 #if (NB_MMU_MODES >= 3)
1670 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1671 #if (NB_MMU_MODES == 4)
1672 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1677 /* add a new TLB entry. At most one entry for a given virtual address
1678 is permitted. Return 0 if OK or 2 if the page could not be mapped
1679 (can only happen in non SOFTMMU mode for I/O pages or pages
1680 conflicting with the host address space). */
1681 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1682 target_phys_addr_t paddr, int prot,
1683 int mmu_idx, int is_softmmu)
1688 target_ulong address;
1689 target_phys_addr_t addend;
1694 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1696 pd = IO_MEM_UNASSIGNED;
1698 pd = p->phys_offset;
1700 #if defined(DEBUG_TLB)
1701 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1702 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1706 #if !defined(CONFIG_SOFTMMU)
1710 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1711 /* IO memory case */
1712 address = vaddr | pd;
1715 /* standard memory */
1717 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1720 /* Make accesses to pages with watchpoints go via the
1721 watchpoint trap routines. */
1722 for (i = 0; i < env->nb_watchpoints; i++) {
1723 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1724 if (address & ~TARGET_PAGE_MASK) {
1725 env->watchpoint[i].addend = 0;
1726 address = vaddr | io_mem_watch;
1728 env->watchpoint[i].addend = pd - paddr +
1729 (unsigned long) phys_ram_base;
1730 /* TODO: Figure out how to make read watchpoints coexist
1732 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1737 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1739 te = &env->tlb_table[mmu_idx][index];
1740 te->addend = addend;
1741 if (prot & PAGE_READ) {
1742 te->addr_read = address;
1747 if (te->addr_code != -1) {
1748 tlb_flush_jmp_cache(env, te->addr_code);
1750 if (prot & PAGE_EXEC) {
1751 te->addr_code = address;
1755 if (prot & PAGE_WRITE) {
1756 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1757 (pd & IO_MEM_ROMD)) {
1758 /* write access calls the I/O callback */
1759 te->addr_write = vaddr |
1760 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1761 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1762 !cpu_physical_memory_is_dirty(pd)) {
1763 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1765 te->addr_write = address;
1768 te->addr_write = -1;
1771 #if !defined(CONFIG_SOFTMMU)
1773 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1774 /* IO access: no mapping is done as it will be handled by the
1776 if (!(env->hflags & HF_SOFTMMU_MASK))
1781 if (vaddr >= MMAP_AREA_END) {
1784 if (prot & PROT_WRITE) {
1785 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1786 #if defined(TARGET_HAS_SMC) || 1
1789 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1790 !cpu_physical_memory_is_dirty(pd))) {
1791 /* ROM: we do as if code was inside */
1792 /* if code is present, we only map as read only and save the
1796 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1799 vp->valid_tag = virt_valid_tag;
1800 prot &= ~PAGE_WRITE;
1803 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1804 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1805 if (map_addr == MAP_FAILED) {
1806 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1816 /* called from signal handler: invalidate the code and unprotect the
1817 page. Return TRUE if the fault was succesfully handled. */
1818 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1820 #if !defined(CONFIG_SOFTMMU)
1823 #if defined(DEBUG_TLB)
1824 printf("page_unprotect: addr=0x%08x\n", addr);
1826 addr &= TARGET_PAGE_MASK;
1828 /* if it is not mapped, no need to worry here */
1829 if (addr >= MMAP_AREA_END)
1831 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1834 /* NOTE: in this case, validate_tag is _not_ tested as it
1835 validates only the code TLB */
1836 if (vp->valid_tag != virt_valid_tag)
1838 if (!(vp->prot & PAGE_WRITE))
1840 #if defined(DEBUG_TLB)
1841 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1842 addr, vp->phys_addr, vp->prot);
1844 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1845 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1846 (unsigned long)addr, vp->prot);
1847 /* set the dirty bit */
1848 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1849 /* flush the code inside */
1850 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1859 void tlb_flush(CPUState *env, int flush_global)
1863 void tlb_flush_page(CPUState *env, target_ulong addr)
1867 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1868 target_phys_addr_t paddr, int prot,
1869 int mmu_idx, int is_softmmu)
1874 /* dump memory mappings */
1875 void page_dump(FILE *f)
1877 unsigned long start, end;
1878 int i, j, prot, prot1;
1881 fprintf(f, "%-8s %-8s %-8s %s\n",
1882 "start", "end", "size", "prot");
1886 for(i = 0; i <= L1_SIZE; i++) {
1891 for(j = 0;j < L2_SIZE; j++) {
1896 if (prot1 != prot) {
1897 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1899 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1900 start, end, end - start,
1901 prot & PAGE_READ ? 'r' : '-',
1902 prot & PAGE_WRITE ? 'w' : '-',
1903 prot & PAGE_EXEC ? 'x' : '-');
1917 int page_get_flags(target_ulong address)
1921 p = page_find(address >> TARGET_PAGE_BITS);
1927 /* modify the flags of a page and invalidate the code if
1928 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1929 depending on PAGE_WRITE */
1930 void page_set_flags(target_ulong start, target_ulong end, int flags)
1935 start = start & TARGET_PAGE_MASK;
1936 end = TARGET_PAGE_ALIGN(end);
1937 if (flags & PAGE_WRITE)
1938 flags |= PAGE_WRITE_ORG;
1939 spin_lock(&tb_lock);
1940 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1941 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1942 /* if the write protection is set, then we invalidate the code
1944 if (!(p->flags & PAGE_WRITE) &&
1945 (flags & PAGE_WRITE) &&
1947 tb_invalidate_phys_page(addr, 0, NULL);
1951 spin_unlock(&tb_lock);
1954 int page_check_range(target_ulong start, target_ulong len, int flags)
1960 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1961 start = start & TARGET_PAGE_MASK;
1964 /* we've wrapped around */
1966 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1967 p = page_find(addr >> TARGET_PAGE_BITS);
1970 if( !(p->flags & PAGE_VALID) )
1973 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1975 if (flags & PAGE_WRITE) {
1976 if (!(p->flags & PAGE_WRITE_ORG))
1978 /* unprotect the page if it was put read-only because it
1979 contains translated code */
1980 if (!(p->flags & PAGE_WRITE)) {
1981 if (!page_unprotect(addr, 0, NULL))
1990 /* called from signal handler: invalidate the code and unprotect the
1991 page. Return TRUE if the fault was succesfully handled. */
1992 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1994 unsigned int page_index, prot, pindex;
1996 target_ulong host_start, host_end, addr;
1998 host_start = address & qemu_host_page_mask;
1999 page_index = host_start >> TARGET_PAGE_BITS;
2000 p1 = page_find(page_index);
2003 host_end = host_start + qemu_host_page_size;
2006 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2010 /* if the page was really writable, then we change its
2011 protection back to writable */
2012 if (prot & PAGE_WRITE_ORG) {
2013 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2014 if (!(p1[pindex].flags & PAGE_WRITE)) {
2015 mprotect((void *)g2h(host_start), qemu_host_page_size,
2016 (prot & PAGE_BITS) | PAGE_WRITE);
2017 p1[pindex].flags |= PAGE_WRITE;
2018 /* and since the content will be modified, we must invalidate
2019 the corresponding translated code. */
2020 tb_invalidate_phys_page(address, pc, puc);
2021 #ifdef DEBUG_TB_CHECK
2022 tb_invalidate_check(address);
2030 static inline void tlb_set_dirty(CPUState *env,
2031 unsigned long addr, target_ulong vaddr)
2034 #endif /* defined(CONFIG_USER_ONLY) */
2036 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2038 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2039 ram_addr_t orig_memory);
2040 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2043 if (addr > start_addr) \
2046 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2047 if (start_addr2 > 0) \
2051 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2052 end_addr2 = TARGET_PAGE_SIZE - 1; \
2054 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2055 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2060 /* register physical memory. 'size' must be a multiple of the target
2061 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2063 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2065 ram_addr_t phys_offset)
2067 target_phys_addr_t addr, end_addr;
2070 ram_addr_t orig_size = size;
2073 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2074 end_addr = start_addr + (target_phys_addr_t)size;
2075 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2076 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2077 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2078 ram_addr_t orig_memory = p->phys_offset;
2079 target_phys_addr_t start_addr2, end_addr2;
2080 int need_subpage = 0;
2082 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2084 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2085 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2086 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2087 &p->phys_offset, orig_memory);
2089 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2092 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2094 p->phys_offset = phys_offset;
2095 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2096 (phys_offset & IO_MEM_ROMD))
2097 phys_offset += TARGET_PAGE_SIZE;
2100 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2101 p->phys_offset = phys_offset;
2102 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2103 (phys_offset & IO_MEM_ROMD))
2104 phys_offset += TARGET_PAGE_SIZE;
2106 target_phys_addr_t start_addr2, end_addr2;
2107 int need_subpage = 0;
2109 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2110 end_addr2, need_subpage);
2112 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2113 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2114 &p->phys_offset, IO_MEM_UNASSIGNED);
2115 subpage_register(subpage, start_addr2, end_addr2,
2122 /* since each CPU stores ram addresses in its TLB cache, we must
2123 reset the modified entries */
2125 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2130 /* XXX: temporary until new memory mapping API */
2131 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2135 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2137 return IO_MEM_UNASSIGNED;
2138 return p->phys_offset;
2141 /* XXX: better than nothing */
2142 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2145 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2146 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2147 size, phys_ram_size);
2150 addr = phys_ram_alloc_offset;
2151 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2155 void qemu_ram_free(ram_addr_t addr)
2159 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2161 #ifdef DEBUG_UNASSIGNED
2162 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2165 do_unassigned_access(addr, 0, 0, 0);
2167 do_unassigned_access(addr, 0, 0, 0);
2172 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2174 #ifdef DEBUG_UNASSIGNED
2175 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2178 do_unassigned_access(addr, 1, 0, 0);
2180 do_unassigned_access(addr, 1, 0, 0);
2184 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2185 unassigned_mem_readb,
2186 unassigned_mem_readb,
2187 unassigned_mem_readb,
2190 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2191 unassigned_mem_writeb,
2192 unassigned_mem_writeb,
2193 unassigned_mem_writeb,
2196 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2198 unsigned long ram_addr;
2200 ram_addr = addr - (unsigned long)phys_ram_base;
2201 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2202 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2203 #if !defined(CONFIG_USER_ONLY)
2204 tb_invalidate_phys_page_fast(ram_addr, 1);
2205 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2208 stb_p((uint8_t *)(long)addr, val);
2210 if (cpu_single_env->kqemu_enabled &&
2211 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2212 kqemu_modify_page(cpu_single_env, ram_addr);
2214 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2215 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2216 /* we remove the notdirty callback only if the code has been
2218 if (dirty_flags == 0xff)
2219 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2222 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2224 unsigned long ram_addr;
2226 ram_addr = addr - (unsigned long)phys_ram_base;
2227 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2228 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2229 #if !defined(CONFIG_USER_ONLY)
2230 tb_invalidate_phys_page_fast(ram_addr, 2);
2231 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2234 stw_p((uint8_t *)(long)addr, val);
2236 if (cpu_single_env->kqemu_enabled &&
2237 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2238 kqemu_modify_page(cpu_single_env, ram_addr);
2240 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2241 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2242 /* we remove the notdirty callback only if the code has been
2244 if (dirty_flags == 0xff)
2245 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2248 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2250 unsigned long ram_addr;
2252 ram_addr = addr - (unsigned long)phys_ram_base;
2253 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2254 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2255 #if !defined(CONFIG_USER_ONLY)
2256 tb_invalidate_phys_page_fast(ram_addr, 4);
2257 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2260 stl_p((uint8_t *)(long)addr, val);
2262 if (cpu_single_env->kqemu_enabled &&
2263 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2264 kqemu_modify_page(cpu_single_env, ram_addr);
2266 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2267 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2268 /* we remove the notdirty callback only if the code has been
2270 if (dirty_flags == 0xff)
2271 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2274 static CPUReadMemoryFunc *error_mem_read[3] = {
2275 NULL, /* never used */
2276 NULL, /* never used */
2277 NULL, /* never used */
2280 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2281 notdirty_mem_writeb,
2282 notdirty_mem_writew,
2283 notdirty_mem_writel,
2286 #if defined(CONFIG_SOFTMMU)
2287 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2288 so these check for a hit then pass through to the normal out-of-line
2290 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2292 return ldub_phys(addr);
2295 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2297 return lduw_phys(addr);
2300 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2302 return ldl_phys(addr);
2305 /* Generate a debug exception if a watchpoint has been hit.
2306 Returns the real physical address of the access. addr will be a host
2307 address in case of a RAM location. */
2308 static target_ulong check_watchpoint(target_phys_addr_t addr)
2310 CPUState *env = cpu_single_env;
2312 target_ulong retaddr;
2316 for (i = 0; i < env->nb_watchpoints; i++) {
2317 watch = env->watchpoint[i].vaddr;
2318 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2319 retaddr = addr - env->watchpoint[i].addend;
2320 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2321 cpu_single_env->watchpoint_hit = i + 1;
2322 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2330 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2333 addr = check_watchpoint(addr);
2334 stb_phys(addr, val);
2337 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2340 addr = check_watchpoint(addr);
2341 stw_phys(addr, val);
2344 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2347 addr = check_watchpoint(addr);
2348 stl_phys(addr, val);
2351 static CPUReadMemoryFunc *watch_mem_read[3] = {
2357 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2364 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2370 idx = SUBPAGE_IDX(addr - mmio->base);
2371 #if defined(DEBUG_SUBPAGE)
2372 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2373 mmio, len, addr, idx);
2375 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2380 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2381 uint32_t value, unsigned int len)
2385 idx = SUBPAGE_IDX(addr - mmio->base);
2386 #if defined(DEBUG_SUBPAGE)
2387 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2388 mmio, len, addr, idx, value);
2390 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2393 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2395 #if defined(DEBUG_SUBPAGE)
2396 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2399 return subpage_readlen(opaque, addr, 0);
2402 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2405 #if defined(DEBUG_SUBPAGE)
2406 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2408 subpage_writelen(opaque, addr, value, 0);
2411 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2413 #if defined(DEBUG_SUBPAGE)
2414 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2417 return subpage_readlen(opaque, addr, 1);
2420 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2423 #if defined(DEBUG_SUBPAGE)
2424 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2426 subpage_writelen(opaque, addr, value, 1);
2429 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2431 #if defined(DEBUG_SUBPAGE)
2432 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2435 return subpage_readlen(opaque, addr, 2);
2438 static void subpage_writel (void *opaque,
2439 target_phys_addr_t addr, uint32_t value)
2441 #if defined(DEBUG_SUBPAGE)
2442 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2444 subpage_writelen(opaque, addr, value, 2);
2447 static CPUReadMemoryFunc *subpage_read[] = {
2453 static CPUWriteMemoryFunc *subpage_write[] = {
2459 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2465 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2467 idx = SUBPAGE_IDX(start);
2468 eidx = SUBPAGE_IDX(end);
2469 #if defined(DEBUG_SUBPAGE)
2470 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2471 mmio, start, end, idx, eidx, memory);
2473 memory >>= IO_MEM_SHIFT;
2474 for (; idx <= eidx; idx++) {
2475 for (i = 0; i < 4; i++) {
2476 if (io_mem_read[memory][i]) {
2477 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2478 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2480 if (io_mem_write[memory][i]) {
2481 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2482 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2490 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2491 ram_addr_t orig_memory)
2496 mmio = qemu_mallocz(sizeof(subpage_t));
2499 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2500 #if defined(DEBUG_SUBPAGE)
2501 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2502 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2504 *phys = subpage_memory | IO_MEM_SUBPAGE;
2505 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2511 static void io_mem_init(void)
2513 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2514 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2515 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2518 #if defined(CONFIG_SOFTMMU)
2519 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2520 watch_mem_write, NULL);
2522 /* alloc dirty bits array */
2523 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2524 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2527 /* mem_read and mem_write are arrays of functions containing the
2528 function to access byte (index 0), word (index 1) and dword (index
2529 2). Functions can be omitted with a NULL function pointer. The
2530 registered functions may be modified dynamically later.
2531 If io_index is non zero, the corresponding io zone is
2532 modified. If it is zero, a new io zone is allocated. The return
2533 value can be used with cpu_register_physical_memory(). (-1) is
2534 returned if error. */
2535 int cpu_register_io_memory(int io_index,
2536 CPUReadMemoryFunc **mem_read,
2537 CPUWriteMemoryFunc **mem_write,
2540 int i, subwidth = 0;
2542 if (io_index <= 0) {
2543 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2545 io_index = io_mem_nb++;
2547 if (io_index >= IO_MEM_NB_ENTRIES)
2551 for(i = 0;i < 3; i++) {
2552 if (!mem_read[i] || !mem_write[i])
2553 subwidth = IO_MEM_SUBWIDTH;
2554 io_mem_read[io_index][i] = mem_read[i];
2555 io_mem_write[io_index][i] = mem_write[i];
2557 io_mem_opaque[io_index] = opaque;
2558 return (io_index << IO_MEM_SHIFT) | subwidth;
2561 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2563 return io_mem_write[io_index >> IO_MEM_SHIFT];
2566 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2568 return io_mem_read[io_index >> IO_MEM_SHIFT];
2571 /* physical memory access (slow version, mainly for debug) */
2572 #if defined(CONFIG_USER_ONLY)
2573 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2574 int len, int is_write)
2581 page = addr & TARGET_PAGE_MASK;
2582 l = (page + TARGET_PAGE_SIZE) - addr;
2585 flags = page_get_flags(page);
2586 if (!(flags & PAGE_VALID))
2589 if (!(flags & PAGE_WRITE))
2591 /* XXX: this code should not depend on lock_user */
2592 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2593 /* FIXME - should this return an error rather than just fail? */
2596 unlock_user(p, addr, l);
2598 if (!(flags & PAGE_READ))
2600 /* XXX: this code should not depend on lock_user */
2601 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2602 /* FIXME - should this return an error rather than just fail? */
2605 unlock_user(p, addr, 0);
2614 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2615 int len, int is_write)
2620 target_phys_addr_t page;
2625 page = addr & TARGET_PAGE_MASK;
2626 l = (page + TARGET_PAGE_SIZE) - addr;
2629 p = phys_page_find(page >> TARGET_PAGE_BITS);
2631 pd = IO_MEM_UNASSIGNED;
2633 pd = p->phys_offset;
2637 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2638 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2639 /* XXX: could force cpu_single_env to NULL to avoid
2641 if (l >= 4 && ((addr & 3) == 0)) {
2642 /* 32 bit write access */
2644 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2646 } else if (l >= 2 && ((addr & 1) == 0)) {
2647 /* 16 bit write access */
2649 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2652 /* 8 bit write access */
2654 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2658 unsigned long addr1;
2659 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2661 ptr = phys_ram_base + addr1;
2662 memcpy(ptr, buf, l);
2663 if (!cpu_physical_memory_is_dirty(addr1)) {
2664 /* invalidate code */
2665 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2667 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2668 (0xff & ~CODE_DIRTY_FLAG);
2672 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2673 !(pd & IO_MEM_ROMD)) {
2675 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2676 if (l >= 4 && ((addr & 3) == 0)) {
2677 /* 32 bit read access */
2678 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2681 } else if (l >= 2 && ((addr & 1) == 0)) {
2682 /* 16 bit read access */
2683 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2687 /* 8 bit read access */
2688 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2694 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2695 (addr & ~TARGET_PAGE_MASK);
2696 memcpy(buf, ptr, l);
2705 /* used for ROM loading : can write in RAM and ROM */
2706 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2707 const uint8_t *buf, int len)
2711 target_phys_addr_t page;
2716 page = addr & TARGET_PAGE_MASK;
2717 l = (page + TARGET_PAGE_SIZE) - addr;
2720 p = phys_page_find(page >> TARGET_PAGE_BITS);
2722 pd = IO_MEM_UNASSIGNED;
2724 pd = p->phys_offset;
2727 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2728 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2729 !(pd & IO_MEM_ROMD)) {
2732 unsigned long addr1;
2733 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2735 ptr = phys_ram_base + addr1;
2736 memcpy(ptr, buf, l);
2745 /* warning: addr must be aligned */
2746 uint32_t ldl_phys(target_phys_addr_t addr)
2754 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2756 pd = IO_MEM_UNASSIGNED;
2758 pd = p->phys_offset;
2761 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2762 !(pd & IO_MEM_ROMD)) {
2764 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2765 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2768 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2769 (addr & ~TARGET_PAGE_MASK);
2775 /* warning: addr must be aligned */
2776 uint64_t ldq_phys(target_phys_addr_t addr)
2784 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2786 pd = IO_MEM_UNASSIGNED;
2788 pd = p->phys_offset;
2791 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2792 !(pd & IO_MEM_ROMD)) {
2794 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2795 #ifdef TARGET_WORDS_BIGENDIAN
2796 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2797 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2799 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2800 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2804 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2805 (addr & ~TARGET_PAGE_MASK);
2812 uint32_t ldub_phys(target_phys_addr_t addr)
2815 cpu_physical_memory_read(addr, &val, 1);
2820 uint32_t lduw_phys(target_phys_addr_t addr)
2823 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2824 return tswap16(val);
2827 /* warning: addr must be aligned. The ram page is not masked as dirty
2828 and the code inside is not invalidated. It is useful if the dirty
2829 bits are used to track modified PTEs */
2830 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2837 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2839 pd = IO_MEM_UNASSIGNED;
2841 pd = p->phys_offset;
2844 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2845 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2846 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2848 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2849 (addr & ~TARGET_PAGE_MASK);
2854 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2861 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2863 pd = IO_MEM_UNASSIGNED;
2865 pd = p->phys_offset;
2868 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2869 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2870 #ifdef TARGET_WORDS_BIGENDIAN
2871 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2872 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2874 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2875 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2878 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2879 (addr & ~TARGET_PAGE_MASK);
2884 /* warning: addr must be aligned */
2885 void stl_phys(target_phys_addr_t addr, uint32_t val)
2892 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2894 pd = IO_MEM_UNASSIGNED;
2896 pd = p->phys_offset;
2899 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2900 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2901 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2903 unsigned long addr1;
2904 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2906 ptr = phys_ram_base + addr1;
2908 if (!cpu_physical_memory_is_dirty(addr1)) {
2909 /* invalidate code */
2910 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2912 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2913 (0xff & ~CODE_DIRTY_FLAG);
2919 void stb_phys(target_phys_addr_t addr, uint32_t val)
2922 cpu_physical_memory_write(addr, &v, 1);
2926 void stw_phys(target_phys_addr_t addr, uint32_t val)
2928 uint16_t v = tswap16(val);
2929 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2933 void stq_phys(target_phys_addr_t addr, uint64_t val)
2936 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2941 /* virtual memory access for debug */
2942 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2943 uint8_t *buf, int len, int is_write)
2946 target_phys_addr_t phys_addr;
2950 page = addr & TARGET_PAGE_MASK;
2951 phys_addr = cpu_get_phys_page_debug(env, page);
2952 /* if no physical page mapped, return an error */
2953 if (phys_addr == -1)
2955 l = (page + TARGET_PAGE_SIZE) - addr;
2958 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2967 void dump_exec_info(FILE *f,
2968 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2970 int i, target_code_size, max_target_code_size;
2971 int direct_jmp_count, direct_jmp2_count, cross_page;
2972 TranslationBlock *tb;
2974 target_code_size = 0;
2975 max_target_code_size = 0;
2977 direct_jmp_count = 0;
2978 direct_jmp2_count = 0;
2979 for(i = 0; i < nb_tbs; i++) {
2981 target_code_size += tb->size;
2982 if (tb->size > max_target_code_size)
2983 max_target_code_size = tb->size;
2984 if (tb->page_addr[1] != -1)
2986 if (tb->tb_next_offset[0] != 0xffff) {
2988 if (tb->tb_next_offset[1] != 0xffff) {
2989 direct_jmp2_count++;
2993 /* XXX: avoid using doubles ? */
2994 cpu_fprintf(f, "Translation buffer state:\n");
2995 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2996 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2997 nb_tbs ? target_code_size / nb_tbs : 0,
2998 max_target_code_size);
2999 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3000 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3001 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3002 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3004 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3005 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3007 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3009 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3010 cpu_fprintf(f, "\nStatistics:\n");
3011 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3012 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3013 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3014 tcg_dump_info(f, cpu_fprintf);
3017 #if !defined(CONFIG_USER_ONLY)
3019 #define MMUSUFFIX _cmmu
3020 #define GETPC() NULL
3021 #define env cpu_single_env
3022 #define SOFTMMU_CODE_ACCESS
3025 #include "softmmu_template.h"
3028 #include "softmmu_template.h"
3031 #include "softmmu_template.h"
3034 #include "softmmu_template.h"