2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
152 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153 /* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
157 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
162 #define L1_SIZE (1 << L1_BITS)
163 #define L2_SIZE (1 << L2_BITS)
165 unsigned long qemu_real_host_page_size;
166 unsigned long qemu_host_page_bits;
167 unsigned long qemu_host_page_size;
168 unsigned long qemu_host_page_mask;
170 /* XXX: for system emulation, it could just be an array */
171 static PageDesc *l1_map[L1_SIZE];
172 static PhysPageDesc **l1_phys_map;
174 #if !defined(CONFIG_USER_ONLY)
175 static void io_mem_init(void);
177 /* io memory support */
178 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
180 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
181 static int io_mem_nb;
182 static int io_mem_watch;
186 static const char *logfilename = "/tmp/qemu.log";
189 static int log_append = 0;
192 static int tlb_flush_count;
193 static int tb_flush_count;
194 static int tb_phys_invalidate_count;
196 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197 typedef struct subpage_t {
198 target_phys_addr_t base;
199 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201 void *opaque[TARGET_PAGE_SIZE][2][4];
205 static void map_exec(void *addr, long size)
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
213 static void map_exec(void *addr, long size)
215 unsigned long start, end, page_size;
217 page_size = getpagesize();
218 start = (unsigned long)addr;
219 start &= ~(page_size - 1);
221 end = (unsigned long)addr + size;
222 end += page_size - 1;
223 end &= ~(page_size - 1);
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
230 static void page_init(void)
232 /* NOTE: we can always suppose that qemu_host_page_size >=
236 SYSTEM_INFO system_info;
239 GetSystemInfo(&system_info);
240 qemu_real_host_page_size = system_info.dwPageSize;
243 qemu_real_host_page_size = getpagesize();
245 if (qemu_host_page_size == 0)
246 qemu_host_page_size = qemu_real_host_page_size;
247 if (qemu_host_page_size < TARGET_PAGE_SIZE)
248 qemu_host_page_size = TARGET_PAGE_SIZE;
249 qemu_host_page_bits = 0;
250 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
251 qemu_host_page_bits++;
252 qemu_host_page_mask = ~(qemu_host_page_size - 1);
253 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
254 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
256 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258 long long startaddr, endaddr;
263 last_brk = (unsigned long)sbrk(0);
264 f = fopen("/proc/self/maps", "r");
267 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 startaddr = MIN(startaddr,
270 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
271 endaddr = MIN(endaddr,
272 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
273 page_set_flags(startaddr & TARGET_PAGE_MASK,
274 TARGET_PAGE_ALIGN(endaddr),
285 static inline PageDesc **page_l1_map(target_ulong index)
287 #if TARGET_LONG_BITS > 32
288 /* Host memory outside guest VM. For 32-bit targets we have already
289 excluded high addresses. */
290 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
293 return &l1_map[index >> L2_BITS];
296 static inline PageDesc *page_find_alloc(target_ulong index)
299 lp = page_l1_map(index);
305 /* allocate if not found */
306 #if defined(CONFIG_USER_ONLY)
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
314 if (addr == (target_ulong)addr) {
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
324 return p + (index & (L2_SIZE - 1));
327 static inline PageDesc *page_find(target_ulong index)
330 lp = page_l1_map(index);
337 return p + (index & (L2_SIZE - 1));
340 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
345 p = (void **)l1_phys_map;
346 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
348 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
354 /* allocate if not found */
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
366 /* allocate if not found */
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
371 for (i = 0; i < L2_SIZE; i++)
372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
374 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
377 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
379 return phys_page_find_alloc(index, 0);
382 #if !defined(CONFIG_USER_ONLY)
383 static void tlb_protect_code(ram_addr_t ram_addr);
384 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
386 #define mmap_lock() do { } while(0)
387 #define mmap_unlock() do { } while(0)
390 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
392 #if defined(CONFIG_USER_ONLY)
393 /* Currently it is not recommanded to allocate big chunks of data in
394 user mode. It will change when a dedicated libc will be used */
395 #define USE_STATIC_CODE_GEN_BUFFER
398 #ifdef USE_STATIC_CODE_GEN_BUFFER
399 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402 static void code_gen_alloc(unsigned long tb_size)
404 #ifdef USE_STATIC_CODE_GEN_BUFFER
405 code_gen_buffer = static_code_gen_buffer;
406 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
407 map_exec(code_gen_buffer, code_gen_buffer_size);
409 code_gen_buffer_size = tb_size;
410 if (code_gen_buffer_size == 0) {
411 #if defined(CONFIG_USER_ONLY)
412 /* in user mode, phys_ram_size is not meaningful */
413 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415 /* XXX: needs ajustments */
416 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
419 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
420 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
421 /* The code gen buffer location may have constraints depending on
422 the host cpu and OS */
423 #if defined(__linux__)
428 flags = MAP_PRIVATE | MAP_ANONYMOUS;
429 #if defined(__x86_64__)
431 /* Cannot map more than that */
432 if (code_gen_buffer_size > (800 * 1024 * 1024))
433 code_gen_buffer_size = (800 * 1024 * 1024);
434 #elif defined(__sparc_v9__)
435 // Map the buffer below 2G, so we can use direct calls and branches
437 start = (void *) 0x60000000UL;
438 if (code_gen_buffer_size > (512 * 1024 * 1024))
439 code_gen_buffer_size = (512 * 1024 * 1024);
441 code_gen_buffer = mmap(start, code_gen_buffer_size,
442 PROT_WRITE | PROT_READ | PROT_EXEC,
444 if (code_gen_buffer == MAP_FAILED) {
445 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
449 #elif defined(__FreeBSD__)
453 flags = MAP_PRIVATE | MAP_ANONYMOUS;
454 #if defined(__x86_64__)
455 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
456 * 0x40000000 is free */
458 addr = (void *)0x40000000;
459 /* Cannot map more than that */
460 if (code_gen_buffer_size > (800 * 1024 * 1024))
461 code_gen_buffer_size = (800 * 1024 * 1024);
463 code_gen_buffer = mmap(addr, code_gen_buffer_size,
464 PROT_WRITE | PROT_READ | PROT_EXEC,
466 if (code_gen_buffer == MAP_FAILED) {
467 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
472 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
473 if (!code_gen_buffer) {
474 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
477 map_exec(code_gen_buffer, code_gen_buffer_size);
479 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
480 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
481 code_gen_buffer_max_size = code_gen_buffer_size -
482 code_gen_max_block_size();
483 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
484 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
487 /* Must be called before using the QEMU cpus. 'tb_size' is the size
488 (in bytes) allocated to the translation buffer. Zero means default
490 void cpu_exec_init_all(unsigned long tb_size)
493 code_gen_alloc(tb_size);
494 code_gen_ptr = code_gen_buffer;
496 #if !defined(CONFIG_USER_ONLY)
501 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
503 #define CPU_COMMON_SAVE_VERSION 1
505 static void cpu_common_save(QEMUFile *f, void *opaque)
507 CPUState *env = opaque;
509 qemu_put_be32s(f, &env->halted);
510 qemu_put_be32s(f, &env->interrupt_request);
513 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
515 CPUState *env = opaque;
517 if (version_id != CPU_COMMON_SAVE_VERSION)
520 qemu_get_be32s(f, &env->halted);
521 qemu_get_be32s(f, &env->interrupt_request);
528 void cpu_exec_init(CPUState *env)
533 env->next_cpu = NULL;
536 while (*penv != NULL) {
537 penv = (CPUState **)&(*penv)->next_cpu;
540 env->cpu_index = cpu_index;
541 env->nb_watchpoints = 0;
543 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
544 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
545 cpu_common_save, cpu_common_load, env);
546 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
547 cpu_save, cpu_load, env);
551 static inline void invalidate_page_bitmap(PageDesc *p)
553 if (p->code_bitmap) {
554 qemu_free(p->code_bitmap);
555 p->code_bitmap = NULL;
557 p->code_write_count = 0;
560 /* set to NULL all the 'first_tb' fields in all PageDescs */
561 static void page_flush_tb(void)
566 for(i = 0; i < L1_SIZE; i++) {
569 for(j = 0; j < L2_SIZE; j++) {
571 invalidate_page_bitmap(p);
578 /* flush all the translation blocks */
579 /* XXX: tb_flush is currently not thread safe */
580 void tb_flush(CPUState *env1)
583 #if defined(DEBUG_FLUSH)
584 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
585 (unsigned long)(code_gen_ptr - code_gen_buffer),
587 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
589 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
590 cpu_abort(env1, "Internal error: code buffer overflow\n");
594 for(env = first_cpu; env != NULL; env = env->next_cpu) {
595 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
598 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
601 code_gen_ptr = code_gen_buffer;
602 /* XXX: flush processor icache at this point if cache flush is
607 #ifdef DEBUG_TB_CHECK
609 static void tb_invalidate_check(target_ulong address)
611 TranslationBlock *tb;
613 address &= TARGET_PAGE_MASK;
614 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
615 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
616 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
617 address >= tb->pc + tb->size)) {
618 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
619 address, (long)tb->pc, tb->size);
625 /* verify that all the pages have correct rights for code */
626 static void tb_page_check(void)
628 TranslationBlock *tb;
629 int i, flags1, flags2;
631 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
632 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
633 flags1 = page_get_flags(tb->pc);
634 flags2 = page_get_flags(tb->pc + tb->size - 1);
635 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
636 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
637 (long)tb->pc, tb->size, flags1, flags2);
643 static void tb_jmp_check(TranslationBlock *tb)
645 TranslationBlock *tb1;
648 /* suppress any remaining jumps to this TB */
652 tb1 = (TranslationBlock *)((long)tb1 & ~3);
655 tb1 = tb1->jmp_next[n1];
657 /* check end of list */
659 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
665 /* invalidate one TB */
666 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
669 TranslationBlock *tb1;
673 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
676 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
680 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
682 TranslationBlock *tb1;
688 tb1 = (TranslationBlock *)((long)tb1 & ~3);
690 *ptb = tb1->page_next[n1];
693 ptb = &tb1->page_next[n1];
697 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
699 TranslationBlock *tb1, **ptb;
702 ptb = &tb->jmp_next[n];
705 /* find tb(n) in circular list */
709 tb1 = (TranslationBlock *)((long)tb1 & ~3);
710 if (n1 == n && tb1 == tb)
713 ptb = &tb1->jmp_first;
715 ptb = &tb1->jmp_next[n1];
718 /* now we can suppress tb(n) from the list */
719 *ptb = tb->jmp_next[n];
721 tb->jmp_next[n] = NULL;
725 /* reset the jump entry 'n' of a TB so that it is not chained to
727 static inline void tb_reset_jump(TranslationBlock *tb, int n)
729 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
732 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
737 target_phys_addr_t phys_pc;
738 TranslationBlock *tb1, *tb2;
740 /* remove the TB from the hash list */
741 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
742 h = tb_phys_hash_func(phys_pc);
743 tb_remove(&tb_phys_hash[h], tb,
744 offsetof(TranslationBlock, phys_hash_next));
746 /* remove the TB from the page list */
747 if (tb->page_addr[0] != page_addr) {
748 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
749 tb_page_remove(&p->first_tb, tb);
750 invalidate_page_bitmap(p);
752 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
753 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
754 tb_page_remove(&p->first_tb, tb);
755 invalidate_page_bitmap(p);
758 tb_invalidated_flag = 1;
760 /* remove the TB from the hash list */
761 h = tb_jmp_cache_hash_func(tb->pc);
762 for(env = first_cpu; env != NULL; env = env->next_cpu) {
763 if (env->tb_jmp_cache[h] == tb)
764 env->tb_jmp_cache[h] = NULL;
767 /* suppress this TB from the two jump lists */
768 tb_jmp_remove(tb, 0);
769 tb_jmp_remove(tb, 1);
771 /* suppress any remaining jumps to this TB */
777 tb1 = (TranslationBlock *)((long)tb1 & ~3);
778 tb2 = tb1->jmp_next[n1];
779 tb_reset_jump(tb1, n1);
780 tb1->jmp_next[n1] = NULL;
783 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
785 tb_phys_invalidate_count++;
788 static inline void set_bits(uint8_t *tab, int start, int len)
794 mask = 0xff << (start & 7);
795 if ((start & ~7) == (end & ~7)) {
797 mask &= ~(0xff << (end & 7));
802 start = (start + 8) & ~7;
804 while (start < end1) {
809 mask = ~(0xff << (end & 7));
815 static void build_page_bitmap(PageDesc *p)
817 int n, tb_start, tb_end;
818 TranslationBlock *tb;
820 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
827 tb = (TranslationBlock *)((long)tb & ~3);
828 /* NOTE: this is subtle as a TB may span two physical pages */
830 /* NOTE: tb_end may be after the end of the page, but
831 it is not a problem */
832 tb_start = tb->pc & ~TARGET_PAGE_MASK;
833 tb_end = tb_start + tb->size;
834 if (tb_end > TARGET_PAGE_SIZE)
835 tb_end = TARGET_PAGE_SIZE;
838 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
840 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
841 tb = tb->page_next[n];
845 TranslationBlock *tb_gen_code(CPUState *env,
846 target_ulong pc, target_ulong cs_base,
847 int flags, int cflags)
849 TranslationBlock *tb;
851 target_ulong phys_pc, phys_page2, virt_page2;
854 phys_pc = get_phys_addr_code(env, pc);
857 /* flush must be done */
859 /* cannot fail at this point */
861 /* Don't forget to invalidate previous TB info. */
862 tb_invalidated_flag = 1;
864 tc_ptr = code_gen_ptr;
866 tb->cs_base = cs_base;
869 cpu_gen_code(env, tb, &code_gen_size);
870 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
872 /* check next page if needed */
873 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
875 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
876 phys_page2 = get_phys_addr_code(env, virt_page2);
878 tb_link_phys(tb, phys_pc, phys_page2);
882 /* invalidate all TBs which intersect with the target physical page
883 starting in range [start;end[. NOTE: start and end must refer to
884 the same physical page. 'is_cpu_write_access' should be true if called
885 from a real cpu write access: the virtual CPU will exit the current
886 TB if code is modified inside this TB. */
887 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
888 int is_cpu_write_access)
890 int n, current_tb_modified, current_tb_not_found, current_flags;
891 CPUState *env = cpu_single_env;
893 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
894 target_ulong tb_start, tb_end;
895 target_ulong current_pc, current_cs_base;
897 p = page_find(start >> TARGET_PAGE_BITS);
900 if (!p->code_bitmap &&
901 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
902 is_cpu_write_access) {
903 /* build code bitmap */
904 build_page_bitmap(p);
907 /* we remove all the TBs in the range [start, end[ */
908 /* XXX: see if in some cases it could be faster to invalidate all the code */
909 current_tb_not_found = is_cpu_write_access;
910 current_tb_modified = 0;
911 current_tb = NULL; /* avoid warning */
912 current_pc = 0; /* avoid warning */
913 current_cs_base = 0; /* avoid warning */
914 current_flags = 0; /* avoid warning */
918 tb = (TranslationBlock *)((long)tb & ~3);
919 tb_next = tb->page_next[n];
920 /* NOTE: this is subtle as a TB may span two physical pages */
922 /* NOTE: tb_end may be after the end of the page, but
923 it is not a problem */
924 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
925 tb_end = tb_start + tb->size;
927 tb_start = tb->page_addr[1];
928 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
930 if (!(tb_end <= start || tb_start >= end)) {
931 #ifdef TARGET_HAS_PRECISE_SMC
932 if (current_tb_not_found) {
933 current_tb_not_found = 0;
935 if (env->mem_io_pc) {
936 /* now we have a real cpu fault */
937 current_tb = tb_find_pc(env->mem_io_pc);
940 if (current_tb == tb &&
941 (current_tb->cflags & CF_COUNT_MASK) != 1) {
942 /* If we are modifying the current TB, we must stop
943 its execution. We could be more precise by checking
944 that the modification is after the current PC, but it
945 would require a specialized function to partially
946 restore the CPU state */
948 current_tb_modified = 1;
949 cpu_restore_state(current_tb, env,
950 env->mem_io_pc, NULL);
951 #if defined(TARGET_I386)
952 current_flags = env->hflags;
953 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
954 current_cs_base = (target_ulong)env->segs[R_CS].base;
955 current_pc = current_cs_base + env->eip;
957 #error unsupported CPU
960 #endif /* TARGET_HAS_PRECISE_SMC */
961 /* we need to do that to handle the case where a signal
962 occurs while doing tb_phys_invalidate() */
965 saved_tb = env->current_tb;
966 env->current_tb = NULL;
968 tb_phys_invalidate(tb, -1);
970 env->current_tb = saved_tb;
971 if (env->interrupt_request && env->current_tb)
972 cpu_interrupt(env, env->interrupt_request);
977 #if !defined(CONFIG_USER_ONLY)
978 /* if no code remaining, no need to continue to use slow writes */
980 invalidate_page_bitmap(p);
981 if (is_cpu_write_access) {
982 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
986 #ifdef TARGET_HAS_PRECISE_SMC
987 if (current_tb_modified) {
988 /* we generate a block containing just the instruction
989 modifying the memory. It will ensure that it cannot modify
991 env->current_tb = NULL;
992 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
993 cpu_resume_from_signal(env, NULL);
998 /* len must be <= 8 and start must be a multiple of len */
999 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1006 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1007 cpu_single_env->mem_io_vaddr, len,
1008 cpu_single_env->eip,
1009 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1013 p = page_find(start >> TARGET_PAGE_BITS);
1016 if (p->code_bitmap) {
1017 offset = start & ~TARGET_PAGE_MASK;
1018 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1019 if (b & ((1 << len) - 1))
1023 tb_invalidate_phys_page_range(start, start + len, 1);
1027 #if !defined(CONFIG_SOFTMMU)
1028 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1029 unsigned long pc, void *puc)
1031 int n, current_flags, current_tb_modified;
1032 target_ulong current_pc, current_cs_base;
1034 TranslationBlock *tb, *current_tb;
1035 #ifdef TARGET_HAS_PRECISE_SMC
1036 CPUState *env = cpu_single_env;
1039 addr &= TARGET_PAGE_MASK;
1040 p = page_find(addr >> TARGET_PAGE_BITS);
1044 current_tb_modified = 0;
1046 current_pc = 0; /* avoid warning */
1047 current_cs_base = 0; /* avoid warning */
1048 current_flags = 0; /* avoid warning */
1049 #ifdef TARGET_HAS_PRECISE_SMC
1050 if (tb && pc != 0) {
1051 current_tb = tb_find_pc(pc);
1054 while (tb != NULL) {
1056 tb = (TranslationBlock *)((long)tb & ~3);
1057 #ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb == tb &&
1059 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060 /* If we are modifying the current TB, we must stop
1061 its execution. We could be more precise by checking
1062 that the modification is after the current PC, but it
1063 would require a specialized function to partially
1064 restore the CPU state */
1066 current_tb_modified = 1;
1067 cpu_restore_state(current_tb, env, pc, puc);
1068 #if defined(TARGET_I386)
1069 current_flags = env->hflags;
1070 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1071 current_cs_base = (target_ulong)env->segs[R_CS].base;
1072 current_pc = current_cs_base + env->eip;
1074 #error unsupported CPU
1077 #endif /* TARGET_HAS_PRECISE_SMC */
1078 tb_phys_invalidate(tb, addr);
1079 tb = tb->page_next[n];
1082 #ifdef TARGET_HAS_PRECISE_SMC
1083 if (current_tb_modified) {
1084 /* we generate a block containing just the instruction
1085 modifying the memory. It will ensure that it cannot modify
1087 env->current_tb = NULL;
1088 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1089 cpu_resume_from_signal(env, puc);
1095 /* add the tb in the target page and protect it if necessary */
1096 static inline void tb_alloc_page(TranslationBlock *tb,
1097 unsigned int n, target_ulong page_addr)
1100 TranslationBlock *last_first_tb;
1102 tb->page_addr[n] = page_addr;
1103 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1104 tb->page_next[n] = p->first_tb;
1105 last_first_tb = p->first_tb;
1106 p->first_tb = (TranslationBlock *)((long)tb | n);
1107 invalidate_page_bitmap(p);
1109 #if defined(TARGET_HAS_SMC) || 1
1111 #if defined(CONFIG_USER_ONLY)
1112 if (p->flags & PAGE_WRITE) {
1117 /* force the host page as non writable (writes will have a
1118 page fault + mprotect overhead) */
1119 page_addr &= qemu_host_page_mask;
1121 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1122 addr += TARGET_PAGE_SIZE) {
1124 p2 = page_find (addr >> TARGET_PAGE_BITS);
1128 p2->flags &= ~PAGE_WRITE;
1129 page_get_flags(addr);
1131 mprotect(g2h(page_addr), qemu_host_page_size,
1132 (prot & PAGE_BITS) & ~PAGE_WRITE);
1133 #ifdef DEBUG_TB_INVALIDATE
1134 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1139 /* if some code is already present, then the pages are already
1140 protected. So we handle the case where only the first TB is
1141 allocated in a physical page */
1142 if (!last_first_tb) {
1143 tlb_protect_code(page_addr);
1147 #endif /* TARGET_HAS_SMC */
1150 /* Allocate a new translation block. Flush the translation buffer if
1151 too many translation blocks or too much generated code. */
1152 TranslationBlock *tb_alloc(target_ulong pc)
1154 TranslationBlock *tb;
1156 if (nb_tbs >= code_gen_max_blocks ||
1157 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1159 tb = &tbs[nb_tbs++];
1165 void tb_free(TranslationBlock *tb)
1167 /* In practice this is mostly used for single use temporary TB
1168 Ignore the hard cases and just back up if this TB happens to
1169 be the last one generated. */
1170 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1171 code_gen_ptr = tb->tc_ptr;
1176 /* add a new TB and link it to the physical page tables. phys_page2 is
1177 (-1) to indicate that only one page contains the TB. */
1178 void tb_link_phys(TranslationBlock *tb,
1179 target_ulong phys_pc, target_ulong phys_page2)
1182 TranslationBlock **ptb;
1184 /* Grab the mmap lock to stop another thread invalidating this TB
1185 before we are done. */
1187 /* add in the physical hash table */
1188 h = tb_phys_hash_func(phys_pc);
1189 ptb = &tb_phys_hash[h];
1190 tb->phys_hash_next = *ptb;
1193 /* add in the page list */
1194 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1195 if (phys_page2 != -1)
1196 tb_alloc_page(tb, 1, phys_page2);
1198 tb->page_addr[1] = -1;
1200 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1201 tb->jmp_next[0] = NULL;
1202 tb->jmp_next[1] = NULL;
1204 /* init original jump addresses */
1205 if (tb->tb_next_offset[0] != 0xffff)
1206 tb_reset_jump(tb, 0);
1207 if (tb->tb_next_offset[1] != 0xffff)
1208 tb_reset_jump(tb, 1);
1210 #ifdef DEBUG_TB_CHECK
1216 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1217 tb[1].tc_ptr. Return NULL if not found */
1218 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1220 int m_min, m_max, m;
1222 TranslationBlock *tb;
1226 if (tc_ptr < (unsigned long)code_gen_buffer ||
1227 tc_ptr >= (unsigned long)code_gen_ptr)
1229 /* binary search (cf Knuth) */
1232 while (m_min <= m_max) {
1233 m = (m_min + m_max) >> 1;
1235 v = (unsigned long)tb->tc_ptr;
1238 else if (tc_ptr < v) {
1247 static void tb_reset_jump_recursive(TranslationBlock *tb);
1249 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1251 TranslationBlock *tb1, *tb_next, **ptb;
1254 tb1 = tb->jmp_next[n];
1256 /* find head of list */
1259 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1262 tb1 = tb1->jmp_next[n1];
1264 /* we are now sure now that tb jumps to tb1 */
1267 /* remove tb from the jmp_first list */
1268 ptb = &tb_next->jmp_first;
1272 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1273 if (n1 == n && tb1 == tb)
1275 ptb = &tb1->jmp_next[n1];
1277 *ptb = tb->jmp_next[n];
1278 tb->jmp_next[n] = NULL;
1280 /* suppress the jump to next tb in generated code */
1281 tb_reset_jump(tb, n);
1283 /* suppress jumps in the tb on which we could have jumped */
1284 tb_reset_jump_recursive(tb_next);
1288 static void tb_reset_jump_recursive(TranslationBlock *tb)
1290 tb_reset_jump_recursive2(tb, 0);
1291 tb_reset_jump_recursive2(tb, 1);
1294 #if defined(TARGET_HAS_ICE)
1295 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1297 target_phys_addr_t addr;
1299 ram_addr_t ram_addr;
1302 addr = cpu_get_phys_page_debug(env, pc);
1303 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1305 pd = IO_MEM_UNASSIGNED;
1307 pd = p->phys_offset;
1309 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1310 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1314 /* Add a watchpoint. */
1315 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1319 for (i = 0; i < env->nb_watchpoints; i++) {
1320 if (addr == env->watchpoint[i].vaddr)
1323 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1326 i = env->nb_watchpoints++;
1327 env->watchpoint[i].vaddr = addr;
1328 env->watchpoint[i].type = type;
1329 tlb_flush_page(env, addr);
1330 /* FIXME: This flush is needed because of the hack to make memory ops
1331 terminate the TB. It can be removed once the proper IO trap and
1332 re-execute bits are in. */
1337 /* Remove a watchpoint. */
1338 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1342 for (i = 0; i < env->nb_watchpoints; i++) {
1343 if (addr == env->watchpoint[i].vaddr) {
1344 env->nb_watchpoints--;
1345 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1346 tlb_flush_page(env, addr);
1353 /* Remove all watchpoints. */
1354 void cpu_watchpoint_remove_all(CPUState *env) {
1357 for (i = 0; i < env->nb_watchpoints; i++) {
1358 tlb_flush_page(env, env->watchpoint[i].vaddr);
1360 env->nb_watchpoints = 0;
1363 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1364 breakpoint is reached */
1365 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1367 #if defined(TARGET_HAS_ICE)
1370 for(i = 0; i < env->nb_breakpoints; i++) {
1371 if (env->breakpoints[i] == pc)
1375 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1377 env->breakpoints[env->nb_breakpoints++] = pc;
1379 breakpoint_invalidate(env, pc);
1386 /* remove all breakpoints */
1387 void cpu_breakpoint_remove_all(CPUState *env) {
1388 #if defined(TARGET_HAS_ICE)
1390 for(i = 0; i < env->nb_breakpoints; i++) {
1391 breakpoint_invalidate(env, env->breakpoints[i]);
1393 env->nb_breakpoints = 0;
1397 /* remove a breakpoint */
1398 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1400 #if defined(TARGET_HAS_ICE)
1402 for(i = 0; i < env->nb_breakpoints; i++) {
1403 if (env->breakpoints[i] == pc)
1408 env->nb_breakpoints--;
1409 if (i < env->nb_breakpoints)
1410 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1412 breakpoint_invalidate(env, pc);
1419 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1420 CPU loop after each instruction */
1421 void cpu_single_step(CPUState *env, int enabled)
1423 #if defined(TARGET_HAS_ICE)
1424 if (env->singlestep_enabled != enabled) {
1425 env->singlestep_enabled = enabled;
1426 /* must flush all the translated code to avoid inconsistancies */
1427 /* XXX: only flush what is necessary */
1433 /* enable or disable low levels log */
1434 void cpu_set_log(int log_flags)
1436 loglevel = log_flags;
1437 if (loglevel && !logfile) {
1438 logfile = fopen(logfilename, log_append ? "a" : "w");
1440 perror(logfilename);
1443 #if !defined(CONFIG_SOFTMMU)
1444 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1446 static char logfile_buf[4096];
1447 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1450 setvbuf(logfile, NULL, _IOLBF, 0);
1454 if (!loglevel && logfile) {
1460 void cpu_set_log_filename(const char *filename)
1462 logfilename = strdup(filename);
1467 cpu_set_log(loglevel);
1470 /* mask must never be zero, except for A20 change call */
1471 void cpu_interrupt(CPUState *env, int mask)
1473 #if !defined(USE_NPTL)
1474 TranslationBlock *tb;
1475 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1479 old_mask = env->interrupt_request;
1480 /* FIXME: This is probably not threadsafe. A different thread could
1481 be in the middle of a read-modify-write operation. */
1482 env->interrupt_request |= mask;
1483 #if defined(USE_NPTL)
1484 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1485 problem and hope the cpu will stop of its own accord. For userspace
1486 emulation this often isn't actually as bad as it sounds. Often
1487 signals are used primarily to interrupt blocking syscalls. */
1490 env->icount_decr.u16.high = 0xffff;
1491 #ifndef CONFIG_USER_ONLY
1492 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1493 an async event happened and we need to process it. */
1495 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1496 cpu_abort(env, "Raised interrupt while not in I/O function");
1500 tb = env->current_tb;
1501 /* if the cpu is currently executing code, we must unlink it and
1502 all the potentially executing TB */
1503 if (tb && !testandset(&interrupt_lock)) {
1504 env->current_tb = NULL;
1505 tb_reset_jump_recursive(tb);
1506 resetlock(&interrupt_lock);
1512 void cpu_reset_interrupt(CPUState *env, int mask)
1514 env->interrupt_request &= ~mask;
1517 const CPULogItem cpu_log_items[] = {
1518 { CPU_LOG_TB_OUT_ASM, "out_asm",
1519 "show generated host assembly code for each compiled TB" },
1520 { CPU_LOG_TB_IN_ASM, "in_asm",
1521 "show target assembly code for each compiled TB" },
1522 { CPU_LOG_TB_OP, "op",
1523 "show micro ops for each compiled TB" },
1524 { CPU_LOG_TB_OP_OPT, "op_opt",
1527 "before eflags optimization and "
1529 "after liveness analysis" },
1530 { CPU_LOG_INT, "int",
1531 "show interrupts/exceptions in short format" },
1532 { CPU_LOG_EXEC, "exec",
1533 "show trace before each executed TB (lots of logs)" },
1534 { CPU_LOG_TB_CPU, "cpu",
1535 "show CPU state before block translation" },
1537 { CPU_LOG_PCALL, "pcall",
1538 "show protected mode far calls/returns/exceptions" },
1541 { CPU_LOG_IOPORT, "ioport",
1542 "show all i/o ports accesses" },
1547 static int cmp1(const char *s1, int n, const char *s2)
1549 if (strlen(s2) != n)
1551 return memcmp(s1, s2, n) == 0;
1554 /* takes a comma separated list of log masks. Return 0 if error. */
1555 int cpu_str_to_log_mask(const char *str)
1557 const CPULogItem *item;
1564 p1 = strchr(p, ',');
1567 if(cmp1(p,p1-p,"all")) {
1568 for(item = cpu_log_items; item->mask != 0; item++) {
1572 for(item = cpu_log_items; item->mask != 0; item++) {
1573 if (cmp1(p, p1 - p, item->name))
1587 void cpu_abort(CPUState *env, const char *fmt, ...)
1594 fprintf(stderr, "qemu: fatal: ");
1595 vfprintf(stderr, fmt, ap);
1596 fprintf(stderr, "\n");
1598 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1600 cpu_dump_state(env, stderr, fprintf, 0);
1603 fprintf(logfile, "qemu: fatal: ");
1604 vfprintf(logfile, fmt, ap2);
1605 fprintf(logfile, "\n");
1607 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1609 cpu_dump_state(env, logfile, fprintf, 0);
1619 CPUState *cpu_copy(CPUState *env)
1621 CPUState *new_env = cpu_init(env->cpu_model_str);
1622 /* preserve chaining and index */
1623 CPUState *next_cpu = new_env->next_cpu;
1624 int cpu_index = new_env->cpu_index;
1625 memcpy(new_env, env, sizeof(CPUState));
1626 new_env->next_cpu = next_cpu;
1627 new_env->cpu_index = cpu_index;
1631 #if !defined(CONFIG_USER_ONLY)
1633 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1637 /* Discard jump cache entries for any tb which might potentially
1638 overlap the flushed page. */
1639 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1640 memset (&env->tb_jmp_cache[i], 0,
1641 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1643 i = tb_jmp_cache_hash_page(addr);
1644 memset (&env->tb_jmp_cache[i], 0,
1645 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1648 /* NOTE: if flush_global is true, also flush global entries (not
1650 void tlb_flush(CPUState *env, int flush_global)
1654 #if defined(DEBUG_TLB)
1655 printf("tlb_flush:\n");
1657 /* must reset current TB so that interrupts cannot modify the
1658 links while we are modifying them */
1659 env->current_tb = NULL;
1661 for(i = 0; i < CPU_TLB_SIZE; i++) {
1662 env->tlb_table[0][i].addr_read = -1;
1663 env->tlb_table[0][i].addr_write = -1;
1664 env->tlb_table[0][i].addr_code = -1;
1665 env->tlb_table[1][i].addr_read = -1;
1666 env->tlb_table[1][i].addr_write = -1;
1667 env->tlb_table[1][i].addr_code = -1;
1668 #if (NB_MMU_MODES >= 3)
1669 env->tlb_table[2][i].addr_read = -1;
1670 env->tlb_table[2][i].addr_write = -1;
1671 env->tlb_table[2][i].addr_code = -1;
1672 #if (NB_MMU_MODES == 4)
1673 env->tlb_table[3][i].addr_read = -1;
1674 env->tlb_table[3][i].addr_write = -1;
1675 env->tlb_table[3][i].addr_code = -1;
1680 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1683 if (env->kqemu_enabled) {
1684 kqemu_flush(env, flush_global);
1690 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1692 if (addr == (tlb_entry->addr_read &
1693 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1694 addr == (tlb_entry->addr_write &
1695 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1696 addr == (tlb_entry->addr_code &
1697 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1698 tlb_entry->addr_read = -1;
1699 tlb_entry->addr_write = -1;
1700 tlb_entry->addr_code = -1;
1704 void tlb_flush_page(CPUState *env, target_ulong addr)
1708 #if defined(DEBUG_TLB)
1709 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1711 /* must reset current TB so that interrupts cannot modify the
1712 links while we are modifying them */
1713 env->current_tb = NULL;
1715 addr &= TARGET_PAGE_MASK;
1716 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1717 tlb_flush_entry(&env->tlb_table[0][i], addr);
1718 tlb_flush_entry(&env->tlb_table[1][i], addr);
1719 #if (NB_MMU_MODES >= 3)
1720 tlb_flush_entry(&env->tlb_table[2][i], addr);
1721 #if (NB_MMU_MODES == 4)
1722 tlb_flush_entry(&env->tlb_table[3][i], addr);
1726 tlb_flush_jmp_cache(env, addr);
1729 if (env->kqemu_enabled) {
1730 kqemu_flush_page(env, addr);
1735 /* update the TLBs so that writes to code in the virtual page 'addr'
1737 static void tlb_protect_code(ram_addr_t ram_addr)
1739 cpu_physical_memory_reset_dirty(ram_addr,
1740 ram_addr + TARGET_PAGE_SIZE,
1744 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1745 tested for self modifying code */
1746 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1749 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1752 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1753 unsigned long start, unsigned long length)
1756 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1757 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1758 if ((addr - start) < length) {
1759 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1764 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1768 unsigned long length, start1;
1772 start &= TARGET_PAGE_MASK;
1773 end = TARGET_PAGE_ALIGN(end);
1775 length = end - start;
1778 len = length >> TARGET_PAGE_BITS;
1780 /* XXX: should not depend on cpu context */
1782 if (env->kqemu_enabled) {
1785 for(i = 0; i < len; i++) {
1786 kqemu_set_notdirty(env, addr);
1787 addr += TARGET_PAGE_SIZE;
1791 mask = ~dirty_flags;
1792 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1793 for(i = 0; i < len; i++)
1796 /* we modify the TLB cache so that the dirty bit will be set again
1797 when accessing the range */
1798 start1 = start + (unsigned long)phys_ram_base;
1799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1800 for(i = 0; i < CPU_TLB_SIZE; i++)
1801 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1802 for(i = 0; i < CPU_TLB_SIZE; i++)
1803 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1804 #if (NB_MMU_MODES >= 3)
1805 for(i = 0; i < CPU_TLB_SIZE; i++)
1806 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1807 #if (NB_MMU_MODES == 4)
1808 for(i = 0; i < CPU_TLB_SIZE; i++)
1809 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1815 int cpu_physical_memory_set_dirty_tracking(int enable)
1817 in_migration = enable;
1821 int cpu_physical_memory_get_dirty_tracking(void)
1823 return in_migration;
1826 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1828 ram_addr_t ram_addr;
1830 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1831 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1832 tlb_entry->addend - (unsigned long)phys_ram_base;
1833 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1834 tlb_entry->addr_write |= TLB_NOTDIRTY;
1839 /* update the TLB according to the current state of the dirty bits */
1840 void cpu_tlb_update_dirty(CPUState *env)
1843 for(i = 0; i < CPU_TLB_SIZE; i++)
1844 tlb_update_dirty(&env->tlb_table[0][i]);
1845 for(i = 0; i < CPU_TLB_SIZE; i++)
1846 tlb_update_dirty(&env->tlb_table[1][i]);
1847 #if (NB_MMU_MODES >= 3)
1848 for(i = 0; i < CPU_TLB_SIZE; i++)
1849 tlb_update_dirty(&env->tlb_table[2][i]);
1850 #if (NB_MMU_MODES == 4)
1851 for(i = 0; i < CPU_TLB_SIZE; i++)
1852 tlb_update_dirty(&env->tlb_table[3][i]);
1857 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1859 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1860 tlb_entry->addr_write = vaddr;
1863 /* update the TLB corresponding to virtual page vaddr
1864 so that it is no longer dirty */
1865 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1869 vaddr &= TARGET_PAGE_MASK;
1870 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1871 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1872 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1873 #if (NB_MMU_MODES >= 3)
1874 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1875 #if (NB_MMU_MODES == 4)
1876 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1881 /* add a new TLB entry. At most one entry for a given virtual address
1882 is permitted. Return 0 if OK or 2 if the page could not be mapped
1883 (can only happen in non SOFTMMU mode for I/O pages or pages
1884 conflicting with the host address space). */
1885 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1886 target_phys_addr_t paddr, int prot,
1887 int mmu_idx, int is_softmmu)
1892 target_ulong address;
1893 target_ulong code_address;
1894 target_phys_addr_t addend;
1898 target_phys_addr_t iotlb;
1900 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1902 pd = IO_MEM_UNASSIGNED;
1904 pd = p->phys_offset;
1906 #if defined(DEBUG_TLB)
1907 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1908 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1913 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1914 /* IO memory case (romd handled later) */
1915 address |= TLB_MMIO;
1917 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1918 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1920 iotlb = pd & TARGET_PAGE_MASK;
1921 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1922 iotlb |= IO_MEM_NOTDIRTY;
1924 iotlb |= IO_MEM_ROM;
1926 /* IO handlers are currently passed a phsical address.
1927 It would be nice to pass an offset from the base address
1928 of that region. This would avoid having to special case RAM,
1929 and avoid full address decoding in every device.
1930 We can't use the high bits of pd for this because
1931 IO_MEM_ROMD uses these as a ram address. */
1932 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1935 code_address = address;
1936 /* Make accesses to pages with watchpoints go via the
1937 watchpoint trap routines. */
1938 for (i = 0; i < env->nb_watchpoints; i++) {
1939 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1940 iotlb = io_mem_watch + paddr;
1941 /* TODO: The memory case can be optimized by not trapping
1942 reads of pages with a write breakpoint. */
1943 address |= TLB_MMIO;
1947 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1948 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1949 te = &env->tlb_table[mmu_idx][index];
1950 te->addend = addend - vaddr;
1951 if (prot & PAGE_READ) {
1952 te->addr_read = address;
1957 if (prot & PAGE_EXEC) {
1958 te->addr_code = code_address;
1962 if (prot & PAGE_WRITE) {
1963 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1964 (pd & IO_MEM_ROMD)) {
1965 /* Write access calls the I/O callback. */
1966 te->addr_write = address | TLB_MMIO;
1967 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1968 !cpu_physical_memory_is_dirty(pd)) {
1969 te->addr_write = address | TLB_NOTDIRTY;
1971 te->addr_write = address;
1974 te->addr_write = -1;
1981 void tlb_flush(CPUState *env, int flush_global)
1985 void tlb_flush_page(CPUState *env, target_ulong addr)
1989 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1990 target_phys_addr_t paddr, int prot,
1991 int mmu_idx, int is_softmmu)
1996 /* dump memory mappings */
1997 void page_dump(FILE *f)
1999 unsigned long start, end;
2000 int i, j, prot, prot1;
2003 fprintf(f, "%-8s %-8s %-8s %s\n",
2004 "start", "end", "size", "prot");
2008 for(i = 0; i <= L1_SIZE; i++) {
2013 for(j = 0;j < L2_SIZE; j++) {
2018 if (prot1 != prot) {
2019 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2021 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2022 start, end, end - start,
2023 prot & PAGE_READ ? 'r' : '-',
2024 prot & PAGE_WRITE ? 'w' : '-',
2025 prot & PAGE_EXEC ? 'x' : '-');
2039 int page_get_flags(target_ulong address)
2043 p = page_find(address >> TARGET_PAGE_BITS);
2049 /* modify the flags of a page and invalidate the code if
2050 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2051 depending on PAGE_WRITE */
2052 void page_set_flags(target_ulong start, target_ulong end, int flags)
2057 /* mmap_lock should already be held. */
2058 start = start & TARGET_PAGE_MASK;
2059 end = TARGET_PAGE_ALIGN(end);
2060 if (flags & PAGE_WRITE)
2061 flags |= PAGE_WRITE_ORG;
2062 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2063 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2064 /* We may be called for host regions that are outside guest
2068 /* if the write protection is set, then we invalidate the code
2070 if (!(p->flags & PAGE_WRITE) &&
2071 (flags & PAGE_WRITE) &&
2073 tb_invalidate_phys_page(addr, 0, NULL);
2079 int page_check_range(target_ulong start, target_ulong len, int flags)
2085 if (start + len < start)
2086 /* we've wrapped around */
2089 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2090 start = start & TARGET_PAGE_MASK;
2092 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2093 p = page_find(addr >> TARGET_PAGE_BITS);
2096 if( !(p->flags & PAGE_VALID) )
2099 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2101 if (flags & PAGE_WRITE) {
2102 if (!(p->flags & PAGE_WRITE_ORG))
2104 /* unprotect the page if it was put read-only because it
2105 contains translated code */
2106 if (!(p->flags & PAGE_WRITE)) {
2107 if (!page_unprotect(addr, 0, NULL))
2116 /* called from signal handler: invalidate the code and unprotect the
2117 page. Return TRUE if the fault was succesfully handled. */
2118 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2120 unsigned int page_index, prot, pindex;
2122 target_ulong host_start, host_end, addr;
2124 /* Technically this isn't safe inside a signal handler. However we
2125 know this only ever happens in a synchronous SEGV handler, so in
2126 practice it seems to be ok. */
2129 host_start = address & qemu_host_page_mask;
2130 page_index = host_start >> TARGET_PAGE_BITS;
2131 p1 = page_find(page_index);
2136 host_end = host_start + qemu_host_page_size;
2139 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2143 /* if the page was really writable, then we change its
2144 protection back to writable */
2145 if (prot & PAGE_WRITE_ORG) {
2146 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2147 if (!(p1[pindex].flags & PAGE_WRITE)) {
2148 mprotect((void *)g2h(host_start), qemu_host_page_size,
2149 (prot & PAGE_BITS) | PAGE_WRITE);
2150 p1[pindex].flags |= PAGE_WRITE;
2151 /* and since the content will be modified, we must invalidate
2152 the corresponding translated code. */
2153 tb_invalidate_phys_page(address, pc, puc);
2154 #ifdef DEBUG_TB_CHECK
2155 tb_invalidate_check(address);
2165 static inline void tlb_set_dirty(CPUState *env,
2166 unsigned long addr, target_ulong vaddr)
2169 #endif /* defined(CONFIG_USER_ONLY) */
2171 #if !defined(CONFIG_USER_ONLY)
2172 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2174 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2175 ram_addr_t orig_memory);
2176 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2179 if (addr > start_addr) \
2182 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2183 if (start_addr2 > 0) \
2187 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2188 end_addr2 = TARGET_PAGE_SIZE - 1; \
2190 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2191 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2196 /* register physical memory. 'size' must be a multiple of the target
2197 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2199 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2201 ram_addr_t phys_offset)
2203 target_phys_addr_t addr, end_addr;
2206 ram_addr_t orig_size = size;
2210 /* XXX: should not depend on cpu context */
2212 if (env->kqemu_enabled) {
2213 kqemu_set_phys_mem(start_addr, size, phys_offset);
2217 kvm_set_phys_mem(start_addr, size, phys_offset);
2219 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2220 end_addr = start_addr + (target_phys_addr_t)size;
2221 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2222 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2223 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2224 ram_addr_t orig_memory = p->phys_offset;
2225 target_phys_addr_t start_addr2, end_addr2;
2226 int need_subpage = 0;
2228 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2230 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2231 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2232 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2233 &p->phys_offset, orig_memory);
2235 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2238 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2240 p->phys_offset = phys_offset;
2241 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2242 (phys_offset & IO_MEM_ROMD))
2243 phys_offset += TARGET_PAGE_SIZE;
2246 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2247 p->phys_offset = phys_offset;
2248 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2249 (phys_offset & IO_MEM_ROMD))
2250 phys_offset += TARGET_PAGE_SIZE;
2252 target_phys_addr_t start_addr2, end_addr2;
2253 int need_subpage = 0;
2255 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2256 end_addr2, need_subpage);
2258 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2259 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2260 &p->phys_offset, IO_MEM_UNASSIGNED);
2261 subpage_register(subpage, start_addr2, end_addr2,
2268 /* since each CPU stores ram addresses in its TLB cache, we must
2269 reset the modified entries */
2271 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2276 /* XXX: temporary until new memory mapping API */
2277 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2281 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2283 return IO_MEM_UNASSIGNED;
2284 return p->phys_offset;
2287 /* XXX: better than nothing */
2288 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2291 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2292 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2293 (uint64_t)size, (uint64_t)phys_ram_size);
2296 addr = phys_ram_alloc_offset;
2297 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2301 void qemu_ram_free(ram_addr_t addr)
2305 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2307 #ifdef DEBUG_UNASSIGNED
2308 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2310 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2311 do_unassigned_access(addr, 0, 0, 0, 1);
2316 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2318 #ifdef DEBUG_UNASSIGNED
2319 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2321 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2322 do_unassigned_access(addr, 0, 0, 0, 2);
2327 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2329 #ifdef DEBUG_UNASSIGNED
2330 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2332 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2333 do_unassigned_access(addr, 0, 0, 0, 4);
2338 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2340 #ifdef DEBUG_UNASSIGNED
2341 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2343 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2344 do_unassigned_access(addr, 1, 0, 0, 1);
2348 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2350 #ifdef DEBUG_UNASSIGNED
2351 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2353 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2354 do_unassigned_access(addr, 1, 0, 0, 2);
2358 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2360 #ifdef DEBUG_UNASSIGNED
2361 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2363 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2364 do_unassigned_access(addr, 1, 0, 0, 4);
2368 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2369 unassigned_mem_readb,
2370 unassigned_mem_readw,
2371 unassigned_mem_readl,
2374 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2375 unassigned_mem_writeb,
2376 unassigned_mem_writew,
2377 unassigned_mem_writel,
2380 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2384 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2385 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2386 #if !defined(CONFIG_USER_ONLY)
2387 tb_invalidate_phys_page_fast(ram_addr, 1);
2388 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2391 stb_p(phys_ram_base + ram_addr, val);
2393 if (cpu_single_env->kqemu_enabled &&
2394 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2395 kqemu_modify_page(cpu_single_env, ram_addr);
2397 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2398 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2399 /* we remove the notdirty callback only if the code has been
2401 if (dirty_flags == 0xff)
2402 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2405 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2409 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2410 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2411 #if !defined(CONFIG_USER_ONLY)
2412 tb_invalidate_phys_page_fast(ram_addr, 2);
2413 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2416 stw_p(phys_ram_base + ram_addr, val);
2418 if (cpu_single_env->kqemu_enabled &&
2419 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2420 kqemu_modify_page(cpu_single_env, ram_addr);
2422 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2423 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2424 /* we remove the notdirty callback only if the code has been
2426 if (dirty_flags == 0xff)
2427 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2430 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2434 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2435 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2436 #if !defined(CONFIG_USER_ONLY)
2437 tb_invalidate_phys_page_fast(ram_addr, 4);
2438 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2441 stl_p(phys_ram_base + ram_addr, val);
2443 if (cpu_single_env->kqemu_enabled &&
2444 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2445 kqemu_modify_page(cpu_single_env, ram_addr);
2447 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2448 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2449 /* we remove the notdirty callback only if the code has been
2451 if (dirty_flags == 0xff)
2452 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2455 static CPUReadMemoryFunc *error_mem_read[3] = {
2456 NULL, /* never used */
2457 NULL, /* never used */
2458 NULL, /* never used */
2461 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2462 notdirty_mem_writeb,
2463 notdirty_mem_writew,
2464 notdirty_mem_writel,
2467 /* Generate a debug exception if a watchpoint has been hit. */
2468 static void check_watchpoint(int offset, int flags)
2470 CPUState *env = cpu_single_env;
2474 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2475 for (i = 0; i < env->nb_watchpoints; i++) {
2476 if (vaddr == env->watchpoint[i].vaddr
2477 && (env->watchpoint[i].type & flags)) {
2478 env->watchpoint_hit = i + 1;
2479 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2485 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2486 so these check for a hit then pass through to the normal out-of-line
2488 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2490 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2491 return ldub_phys(addr);
2494 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2496 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2497 return lduw_phys(addr);
2500 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2502 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2503 return ldl_phys(addr);
2506 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2509 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2510 stb_phys(addr, val);
2513 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2516 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2517 stw_phys(addr, val);
2520 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2523 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2524 stl_phys(addr, val);
2527 static CPUReadMemoryFunc *watch_mem_read[3] = {
2533 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2539 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2545 idx = SUBPAGE_IDX(addr - mmio->base);
2546 #if defined(DEBUG_SUBPAGE)
2547 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2548 mmio, len, addr, idx);
2550 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2555 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2556 uint32_t value, unsigned int len)
2560 idx = SUBPAGE_IDX(addr - mmio->base);
2561 #if defined(DEBUG_SUBPAGE)
2562 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2563 mmio, len, addr, idx, value);
2565 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2568 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2570 #if defined(DEBUG_SUBPAGE)
2571 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2574 return subpage_readlen(opaque, addr, 0);
2577 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2580 #if defined(DEBUG_SUBPAGE)
2581 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2583 subpage_writelen(opaque, addr, value, 0);
2586 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2588 #if defined(DEBUG_SUBPAGE)
2589 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2592 return subpage_readlen(opaque, addr, 1);
2595 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2598 #if defined(DEBUG_SUBPAGE)
2599 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2601 subpage_writelen(opaque, addr, value, 1);
2604 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2606 #if defined(DEBUG_SUBPAGE)
2607 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2610 return subpage_readlen(opaque, addr, 2);
2613 static void subpage_writel (void *opaque,
2614 target_phys_addr_t addr, uint32_t value)
2616 #if defined(DEBUG_SUBPAGE)
2617 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2619 subpage_writelen(opaque, addr, value, 2);
2622 static CPUReadMemoryFunc *subpage_read[] = {
2628 static CPUWriteMemoryFunc *subpage_write[] = {
2634 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2640 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2642 idx = SUBPAGE_IDX(start);
2643 eidx = SUBPAGE_IDX(end);
2644 #if defined(DEBUG_SUBPAGE)
2645 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2646 mmio, start, end, idx, eidx, memory);
2648 memory >>= IO_MEM_SHIFT;
2649 for (; idx <= eidx; idx++) {
2650 for (i = 0; i < 4; i++) {
2651 if (io_mem_read[memory][i]) {
2652 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2653 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2655 if (io_mem_write[memory][i]) {
2656 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2657 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2665 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2666 ram_addr_t orig_memory)
2671 mmio = qemu_mallocz(sizeof(subpage_t));
2674 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2675 #if defined(DEBUG_SUBPAGE)
2676 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2677 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2679 *phys = subpage_memory | IO_MEM_SUBPAGE;
2680 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2686 static void io_mem_init(void)
2688 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2689 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2690 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2693 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2694 watch_mem_write, NULL);
2695 /* alloc dirty bits array */
2696 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2697 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2700 /* mem_read and mem_write are arrays of functions containing the
2701 function to access byte (index 0), word (index 1) and dword (index
2702 2). Functions can be omitted with a NULL function pointer. The
2703 registered functions may be modified dynamically later.
2704 If io_index is non zero, the corresponding io zone is
2705 modified. If it is zero, a new io zone is allocated. The return
2706 value can be used with cpu_register_physical_memory(). (-1) is
2707 returned if error. */
2708 int cpu_register_io_memory(int io_index,
2709 CPUReadMemoryFunc **mem_read,
2710 CPUWriteMemoryFunc **mem_write,
2713 int i, subwidth = 0;
2715 if (io_index <= 0) {
2716 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2718 io_index = io_mem_nb++;
2720 if (io_index >= IO_MEM_NB_ENTRIES)
2724 for(i = 0;i < 3; i++) {
2725 if (!mem_read[i] || !mem_write[i])
2726 subwidth = IO_MEM_SUBWIDTH;
2727 io_mem_read[io_index][i] = mem_read[i];
2728 io_mem_write[io_index][i] = mem_write[i];
2730 io_mem_opaque[io_index] = opaque;
2731 return (io_index << IO_MEM_SHIFT) | subwidth;
2734 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2736 return io_mem_write[io_index >> IO_MEM_SHIFT];
2739 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2741 return io_mem_read[io_index >> IO_MEM_SHIFT];
2744 #endif /* !defined(CONFIG_USER_ONLY) */
2746 /* physical memory access (slow version, mainly for debug) */
2747 #if defined(CONFIG_USER_ONLY)
2748 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2749 int len, int is_write)
2756 page = addr & TARGET_PAGE_MASK;
2757 l = (page + TARGET_PAGE_SIZE) - addr;
2760 flags = page_get_flags(page);
2761 if (!(flags & PAGE_VALID))
2764 if (!(flags & PAGE_WRITE))
2766 /* XXX: this code should not depend on lock_user */
2767 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2768 /* FIXME - should this return an error rather than just fail? */
2771 unlock_user(p, addr, l);
2773 if (!(flags & PAGE_READ))
2775 /* XXX: this code should not depend on lock_user */
2776 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2777 /* FIXME - should this return an error rather than just fail? */
2780 unlock_user(p, addr, 0);
2789 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2790 int len, int is_write)
2795 target_phys_addr_t page;
2800 page = addr & TARGET_PAGE_MASK;
2801 l = (page + TARGET_PAGE_SIZE) - addr;
2804 p = phys_page_find(page >> TARGET_PAGE_BITS);
2806 pd = IO_MEM_UNASSIGNED;
2808 pd = p->phys_offset;
2812 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2813 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2814 /* XXX: could force cpu_single_env to NULL to avoid
2816 if (l >= 4 && ((addr & 3) == 0)) {
2817 /* 32 bit write access */
2819 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2821 } else if (l >= 2 && ((addr & 1) == 0)) {
2822 /* 16 bit write access */
2824 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2827 /* 8 bit write access */
2829 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2833 unsigned long addr1;
2834 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2836 ptr = phys_ram_base + addr1;
2837 memcpy(ptr, buf, l);
2838 if (!cpu_physical_memory_is_dirty(addr1)) {
2839 /* invalidate code */
2840 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2842 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2843 (0xff & ~CODE_DIRTY_FLAG);
2847 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2848 !(pd & IO_MEM_ROMD)) {
2850 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2851 if (l >= 4 && ((addr & 3) == 0)) {
2852 /* 32 bit read access */
2853 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2856 } else if (l >= 2 && ((addr & 1) == 0)) {
2857 /* 16 bit read access */
2858 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2862 /* 8 bit read access */
2863 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2869 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2870 (addr & ~TARGET_PAGE_MASK);
2871 memcpy(buf, ptr, l);
2880 /* used for ROM loading : can write in RAM and ROM */
2881 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2882 const uint8_t *buf, int len)
2886 target_phys_addr_t page;
2891 page = addr & TARGET_PAGE_MASK;
2892 l = (page + TARGET_PAGE_SIZE) - addr;
2895 p = phys_page_find(page >> TARGET_PAGE_BITS);
2897 pd = IO_MEM_UNASSIGNED;
2899 pd = p->phys_offset;
2902 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2903 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2904 !(pd & IO_MEM_ROMD)) {
2907 unsigned long addr1;
2908 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2910 ptr = phys_ram_base + addr1;
2911 memcpy(ptr, buf, l);
2920 /* warning: addr must be aligned */
2921 uint32_t ldl_phys(target_phys_addr_t addr)
2929 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2931 pd = IO_MEM_UNASSIGNED;
2933 pd = p->phys_offset;
2936 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2937 !(pd & IO_MEM_ROMD)) {
2939 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2940 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2943 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2944 (addr & ~TARGET_PAGE_MASK);
2950 /* warning: addr must be aligned */
2951 uint64_t ldq_phys(target_phys_addr_t addr)
2959 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2961 pd = IO_MEM_UNASSIGNED;
2963 pd = p->phys_offset;
2966 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2967 !(pd & IO_MEM_ROMD)) {
2969 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2970 #ifdef TARGET_WORDS_BIGENDIAN
2971 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2972 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2974 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2975 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2979 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2980 (addr & ~TARGET_PAGE_MASK);
2987 uint32_t ldub_phys(target_phys_addr_t addr)
2990 cpu_physical_memory_read(addr, &val, 1);
2995 uint32_t lduw_phys(target_phys_addr_t addr)
2998 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2999 return tswap16(val);
3002 /* warning: addr must be aligned. The ram page is not masked as dirty
3003 and the code inside is not invalidated. It is useful if the dirty
3004 bits are used to track modified PTEs */
3005 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3012 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3014 pd = IO_MEM_UNASSIGNED;
3016 pd = p->phys_offset;
3019 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3020 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3021 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3023 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3024 ptr = phys_ram_base + addr1;
3027 if (unlikely(in_migration)) {
3028 if (!cpu_physical_memory_is_dirty(addr1)) {
3029 /* invalidate code */
3030 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3032 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3033 (0xff & ~CODE_DIRTY_FLAG);
3039 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3046 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3048 pd = IO_MEM_UNASSIGNED;
3050 pd = p->phys_offset;
3053 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3054 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3055 #ifdef TARGET_WORDS_BIGENDIAN
3056 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3057 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3059 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3060 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3063 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3064 (addr & ~TARGET_PAGE_MASK);
3069 /* warning: addr must be aligned */
3070 void stl_phys(target_phys_addr_t addr, uint32_t val)
3077 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3079 pd = IO_MEM_UNASSIGNED;
3081 pd = p->phys_offset;
3084 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3085 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3086 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3088 unsigned long addr1;
3089 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3091 ptr = phys_ram_base + addr1;
3093 if (!cpu_physical_memory_is_dirty(addr1)) {
3094 /* invalidate code */
3095 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3097 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3098 (0xff & ~CODE_DIRTY_FLAG);
3104 void stb_phys(target_phys_addr_t addr, uint32_t val)
3107 cpu_physical_memory_write(addr, &v, 1);
3111 void stw_phys(target_phys_addr_t addr, uint32_t val)
3113 uint16_t v = tswap16(val);
3114 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3118 void stq_phys(target_phys_addr_t addr, uint64_t val)
3121 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3126 /* virtual memory access for debug */
3127 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3128 uint8_t *buf, int len, int is_write)
3131 target_phys_addr_t phys_addr;
3135 page = addr & TARGET_PAGE_MASK;
3136 phys_addr = cpu_get_phys_page_debug(env, page);
3137 /* if no physical page mapped, return an error */
3138 if (phys_addr == -1)
3140 l = (page + TARGET_PAGE_SIZE) - addr;
3143 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3152 /* in deterministic execution mode, instructions doing device I/Os
3153 must be at the end of the TB */
3154 void cpu_io_recompile(CPUState *env, void *retaddr)
3156 TranslationBlock *tb;
3158 target_ulong pc, cs_base;
3161 tb = tb_find_pc((unsigned long)retaddr);
3163 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3166 n = env->icount_decr.u16.low + tb->icount;
3167 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3168 /* Calculate how many instructions had been executed before the fault
3170 n = n - env->icount_decr.u16.low;
3171 /* Generate a new TB ending on the I/O insn. */
3173 /* On MIPS and SH, delay slot instructions can only be restarted if
3174 they were already the first instruction in the TB. If this is not
3175 the first instruction in a TB then re-execute the preceding
3177 #if defined(TARGET_MIPS)
3178 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3179 env->active_tc.PC -= 4;
3180 env->icount_decr.u16.low++;
3181 env->hflags &= ~MIPS_HFLAG_BMASK;
3183 #elif defined(TARGET_SH4)
3184 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3187 env->icount_decr.u16.low++;
3188 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3191 /* This should never happen. */
3192 if (n > CF_COUNT_MASK)
3193 cpu_abort(env, "TB too big during recompile");
3195 cflags = n | CF_LAST_IO;
3197 cs_base = tb->cs_base;
3199 tb_phys_invalidate(tb, -1);
3200 /* FIXME: In theory this could raise an exception. In practice
3201 we have already translated the block once so it's probably ok. */
3202 tb_gen_code(env, pc, cs_base, flags, cflags);
3203 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3204 the first in the TB) then we end up generating a whole new TB and
3205 repeating the fault, which is horribly inefficient.
3206 Better would be to execute just this insn uncached, or generate a
3208 cpu_resume_from_signal(env, NULL);
3211 void dump_exec_info(FILE *f,
3212 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3214 int i, target_code_size, max_target_code_size;
3215 int direct_jmp_count, direct_jmp2_count, cross_page;
3216 TranslationBlock *tb;
3218 target_code_size = 0;
3219 max_target_code_size = 0;
3221 direct_jmp_count = 0;
3222 direct_jmp2_count = 0;
3223 for(i = 0; i < nb_tbs; i++) {
3225 target_code_size += tb->size;
3226 if (tb->size > max_target_code_size)
3227 max_target_code_size = tb->size;
3228 if (tb->page_addr[1] != -1)
3230 if (tb->tb_next_offset[0] != 0xffff) {
3232 if (tb->tb_next_offset[1] != 0xffff) {
3233 direct_jmp2_count++;
3237 /* XXX: avoid using doubles ? */
3238 cpu_fprintf(f, "Translation buffer state:\n");
3239 cpu_fprintf(f, "gen code size %ld/%ld\n",
3240 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3241 cpu_fprintf(f, "TB count %d/%d\n",
3242 nb_tbs, code_gen_max_blocks);
3243 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3244 nb_tbs ? target_code_size / nb_tbs : 0,
3245 max_target_code_size);
3246 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3247 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3248 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3249 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3251 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3252 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3254 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3256 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3257 cpu_fprintf(f, "\nStatistics:\n");
3258 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3259 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3260 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3261 tcg_dump_info(f, cpu_fprintf);
3264 #if !defined(CONFIG_USER_ONLY)
3266 #define MMUSUFFIX _cmmu
3267 #define GETPC() NULL
3268 #define env cpu_single_env
3269 #define SOFTMMU_CODE_ACCESS
3272 #include "softmmu_template.h"
3275 #include "softmmu_template.h"
3278 #include "softmmu_template.h"
3281 #include "softmmu_template.h"