2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 static TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91 #if defined(__arm__) || defined(__sparc_v9__)
92 /* The prologue must be reachable with a direct jump. ARM and Sparc64
93 have limited branch ranges (possibly also PPC) so place it in a
94 section close to code segment. */
95 #define code_gen_section \
96 __attribute__((__section__(".gen_code"))) \
97 __attribute__((aligned (32)))
99 #define code_gen_section \
100 __attribute__((aligned (32)))
103 uint8_t code_gen_prologue[1024] code_gen_section;
104 static uint8_t *code_gen_buffer;
105 static unsigned long code_gen_buffer_size;
106 /* threshold to flush the translated code buffer */
107 static unsigned long code_gen_buffer_max_size;
108 uint8_t *code_gen_ptr;
110 #if !defined(CONFIG_USER_ONLY)
111 ram_addr_t phys_ram_size;
113 uint8_t *phys_ram_base;
114 uint8_t *phys_ram_dirty;
115 static int in_migration;
116 static ram_addr_t phys_ram_alloc_offset = 0;
120 /* current CPU in the current thread. It is only valid inside
122 CPUState *cpu_single_env;
123 /* 0 = Do not count executed instructions.
124 1 = Precise instruction counting.
125 2 = Adaptive rate instruction counting. */
127 /* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
131 typedef struct PageDesc {
132 /* list of TBs intersecting this ram page */
133 TranslationBlock *first_tb;
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138 #if defined(CONFIG_USER_ONLY)
143 typedef struct PhysPageDesc {
144 /* offset in host memory of the page + io_index in the low bits */
145 ram_addr_t phys_offset;
146 ram_addr_t region_offset;
150 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
151 /* XXX: this is a temporary hack for alpha target.
152 * In the future, this is to be replaced by a multi-level table
153 * to actually be able to handle the complete 64 bits address space.
155 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
157 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
160 #define L1_SIZE (1 << L1_BITS)
161 #define L2_SIZE (1 << L2_BITS)
163 unsigned long qemu_real_host_page_size;
164 unsigned long qemu_host_page_bits;
165 unsigned long qemu_host_page_size;
166 unsigned long qemu_host_page_mask;
168 /* XXX: for system emulation, it could just be an array */
169 static PageDesc *l1_map[L1_SIZE];
170 static PhysPageDesc **l1_phys_map;
172 #if !defined(CONFIG_USER_ONLY)
173 static void io_mem_init(void);
175 /* io memory support */
176 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
177 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
178 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
179 static char io_mem_used[IO_MEM_NB_ENTRIES];
180 static int io_mem_watch;
184 static const char *logfilename = "/tmp/qemu.log";
187 static int log_append = 0;
190 static int tlb_flush_count;
191 static int tb_flush_count;
192 static int tb_phys_invalidate_count;
194 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
195 typedef struct subpage_t {
196 target_phys_addr_t base;
197 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
198 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
199 void *opaque[TARGET_PAGE_SIZE][2][4];
200 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204 static void map_exec(void *addr, long size)
207 VirtualProtect(addr, size,
208 PAGE_EXECUTE_READWRITE, &old_protect);
212 static void map_exec(void *addr, long size)
214 unsigned long start, end, page_size;
216 page_size = getpagesize();
217 start = (unsigned long)addr;
218 start &= ~(page_size - 1);
220 end = (unsigned long)addr + size;
221 end += page_size - 1;
222 end &= ~(page_size - 1);
224 mprotect((void *)start, end - start,
225 PROT_READ | PROT_WRITE | PROT_EXEC);
229 static void page_init(void)
231 /* NOTE: we can always suppose that qemu_host_page_size >=
235 SYSTEM_INFO system_info;
237 GetSystemInfo(&system_info);
238 qemu_real_host_page_size = system_info.dwPageSize;
241 qemu_real_host_page_size = getpagesize();
243 if (qemu_host_page_size == 0)
244 qemu_host_page_size = qemu_real_host_page_size;
245 if (qemu_host_page_size < TARGET_PAGE_SIZE)
246 qemu_host_page_size = TARGET_PAGE_SIZE;
247 qemu_host_page_bits = 0;
248 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
249 qemu_host_page_bits++;
250 qemu_host_page_mask = ~(qemu_host_page_size - 1);
251 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
252 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
254 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256 long long startaddr, endaddr;
261 last_brk = (unsigned long)sbrk(0);
262 f = fopen("/proc/self/maps", "r");
265 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267 startaddr = MIN(startaddr,
268 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
269 endaddr = MIN(endaddr,
270 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
271 page_set_flags(startaddr & TARGET_PAGE_MASK,
272 TARGET_PAGE_ALIGN(endaddr),
283 static inline PageDesc **page_l1_map(target_ulong index)
285 #if TARGET_LONG_BITS > 32
286 /* Host memory outside guest VM. For 32-bit targets we have already
287 excluded high addresses. */
288 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
291 return &l1_map[index >> L2_BITS];
294 static inline PageDesc *page_find_alloc(target_ulong index)
297 lp = page_l1_map(index);
303 /* allocate if not found */
304 #if defined(CONFIG_USER_ONLY)
305 size_t len = sizeof(PageDesc) * L2_SIZE;
306 /* Don't use qemu_malloc because it may recurse. */
307 p = mmap(0, len, PROT_READ | PROT_WRITE,
308 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
311 unsigned long addr = h2g(p);
312 page_set_flags(addr & TARGET_PAGE_MASK,
313 TARGET_PAGE_ALIGN(addr + len),
317 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 return p + (index & (L2_SIZE - 1));
324 static inline PageDesc *page_find(target_ulong index)
327 lp = page_l1_map(index);
334 return p + (index & (L2_SIZE - 1));
337 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
342 p = (void **)l1_phys_map;
343 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
345 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
346 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
348 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
351 /* allocate if not found */
354 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
355 memset(p, 0, sizeof(void *) * L1_SIZE);
359 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363 /* allocate if not found */
366 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
368 for (i = 0; i < L2_SIZE; i++) {
369 pd[i].phys_offset = IO_MEM_UNASSIGNED;
370 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
376 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
378 return phys_page_find_alloc(index, 0);
381 #if !defined(CONFIG_USER_ONLY)
382 static void tlb_protect_code(ram_addr_t ram_addr);
383 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
385 #define mmap_lock() do { } while(0)
386 #define mmap_unlock() do { } while(0)
389 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391 #if defined(CONFIG_USER_ONLY)
392 /* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394 #define USE_STATIC_CODE_GEN_BUFFER
397 #ifdef USE_STATIC_CODE_GEN_BUFFER
398 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
401 static void code_gen_alloc(unsigned long tb_size)
403 #ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer = static_code_gen_buffer;
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406 map_exec(code_gen_buffer, code_gen_buffer_size);
408 code_gen_buffer_size = tb_size;
409 if (code_gen_buffer_size == 0) {
410 #if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
414 /* XXX: needs ajustments */
415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422 #if defined(__linux__)
427 flags = MAP_PRIVATE | MAP_ANONYMOUS;
428 #if defined(__x86_64__)
430 /* Cannot map more than that */
431 if (code_gen_buffer_size > (800 * 1024 * 1024))
432 code_gen_buffer_size = (800 * 1024 * 1024);
433 #elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
436 start = (void *) 0x60000000UL;
437 if (code_gen_buffer_size > (512 * 1024 * 1024))
438 code_gen_buffer_size = (512 * 1024 * 1024);
439 #elif defined(__arm__)
440 /* Map the buffer below 32M, so we can use direct calls and branches */
442 start = (void *) 0x01000000UL;
443 if (code_gen_buffer_size > 16 * 1024 * 1024)
444 code_gen_buffer_size = 16 * 1024 * 1024;
446 code_gen_buffer = mmap(start, code_gen_buffer_size,
447 PROT_WRITE | PROT_READ | PROT_EXEC,
449 if (code_gen_buffer == MAP_FAILED) {
450 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 #elif defined(__FreeBSD__) || defined(__DragonFly__)
458 flags = MAP_PRIVATE | MAP_ANONYMOUS;
459 #if defined(__x86_64__)
460 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
461 * 0x40000000 is free */
463 addr = (void *)0x40000000;
464 /* Cannot map more than that */
465 if (code_gen_buffer_size > (800 * 1024 * 1024))
466 code_gen_buffer_size = (800 * 1024 * 1024);
468 code_gen_buffer = mmap(addr, code_gen_buffer_size,
469 PROT_WRITE | PROT_READ | PROT_EXEC,
471 if (code_gen_buffer == MAP_FAILED) {
472 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
477 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
478 map_exec(code_gen_buffer, code_gen_buffer_size);
480 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
481 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
482 code_gen_buffer_max_size = code_gen_buffer_size -
483 code_gen_max_block_size();
484 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
485 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
488 /* Must be called before using the QEMU cpus. 'tb_size' is the size
489 (in bytes) allocated to the translation buffer. Zero means default
491 void cpu_exec_init_all(unsigned long tb_size)
494 code_gen_alloc(tb_size);
495 code_gen_ptr = code_gen_buffer;
497 #if !defined(CONFIG_USER_ONLY)
502 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
504 #define CPU_COMMON_SAVE_VERSION 1
506 static void cpu_common_save(QEMUFile *f, void *opaque)
508 CPUState *env = opaque;
510 qemu_put_be32s(f, &env->halted);
511 qemu_put_be32s(f, &env->interrupt_request);
514 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
516 CPUState *env = opaque;
518 if (version_id != CPU_COMMON_SAVE_VERSION)
521 qemu_get_be32s(f, &env->halted);
522 qemu_get_be32s(f, &env->interrupt_request);
523 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
524 version_id is increased. */
525 env->interrupt_request &= ~0x01;
532 void cpu_exec_init(CPUState *env)
537 #if defined(CONFIG_USER_ONLY)
540 env->next_cpu = NULL;
543 while (*penv != NULL) {
544 penv = (CPUState **)&(*penv)->next_cpu;
547 env->cpu_index = cpu_index;
548 TAILQ_INIT(&env->breakpoints);
549 TAILQ_INIT(&env->watchpoints);
551 #if defined(CONFIG_USER_ONLY)
554 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
555 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
556 cpu_common_save, cpu_common_load, env);
557 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
558 cpu_save, cpu_load, env);
562 static inline void invalidate_page_bitmap(PageDesc *p)
564 if (p->code_bitmap) {
565 qemu_free(p->code_bitmap);
566 p->code_bitmap = NULL;
568 p->code_write_count = 0;
571 /* set to NULL all the 'first_tb' fields in all PageDescs */
572 static void page_flush_tb(void)
577 for(i = 0; i < L1_SIZE; i++) {
580 for(j = 0; j < L2_SIZE; j++) {
582 invalidate_page_bitmap(p);
589 /* flush all the translation blocks */
590 /* XXX: tb_flush is currently not thread safe */
591 void tb_flush(CPUState *env1)
594 #if defined(DEBUG_FLUSH)
595 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
596 (unsigned long)(code_gen_ptr - code_gen_buffer),
598 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
600 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
601 cpu_abort(env1, "Internal error: code buffer overflow\n");
605 for(env = first_cpu; env != NULL; env = env->next_cpu) {
606 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
609 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
612 code_gen_ptr = code_gen_buffer;
613 /* XXX: flush processor icache at this point if cache flush is
618 #ifdef DEBUG_TB_CHECK
620 static void tb_invalidate_check(target_ulong address)
622 TranslationBlock *tb;
624 address &= TARGET_PAGE_MASK;
625 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
626 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
627 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
628 address >= tb->pc + tb->size)) {
629 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
630 address, (long)tb->pc, tb->size);
636 /* verify that all the pages have correct rights for code */
637 static void tb_page_check(void)
639 TranslationBlock *tb;
640 int i, flags1, flags2;
642 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
643 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
644 flags1 = page_get_flags(tb->pc);
645 flags2 = page_get_flags(tb->pc + tb->size - 1);
646 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
647 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
648 (long)tb->pc, tb->size, flags1, flags2);
654 static void tb_jmp_check(TranslationBlock *tb)
656 TranslationBlock *tb1;
659 /* suppress any remaining jumps to this TB */
663 tb1 = (TranslationBlock *)((long)tb1 & ~3);
666 tb1 = tb1->jmp_next[n1];
668 /* check end of list */
670 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
676 /* invalidate one TB */
677 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
680 TranslationBlock *tb1;
684 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
687 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
691 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
693 TranslationBlock *tb1;
699 tb1 = (TranslationBlock *)((long)tb1 & ~3);
701 *ptb = tb1->page_next[n1];
704 ptb = &tb1->page_next[n1];
708 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
710 TranslationBlock *tb1, **ptb;
713 ptb = &tb->jmp_next[n];
716 /* find tb(n) in circular list */
720 tb1 = (TranslationBlock *)((long)tb1 & ~3);
721 if (n1 == n && tb1 == tb)
724 ptb = &tb1->jmp_first;
726 ptb = &tb1->jmp_next[n1];
729 /* now we can suppress tb(n) from the list */
730 *ptb = tb->jmp_next[n];
732 tb->jmp_next[n] = NULL;
736 /* reset the jump entry 'n' of a TB so that it is not chained to
738 static inline void tb_reset_jump(TranslationBlock *tb, int n)
740 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
743 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
748 target_phys_addr_t phys_pc;
749 TranslationBlock *tb1, *tb2;
751 /* remove the TB from the hash list */
752 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
753 h = tb_phys_hash_func(phys_pc);
754 tb_remove(&tb_phys_hash[h], tb,
755 offsetof(TranslationBlock, phys_hash_next));
757 /* remove the TB from the page list */
758 if (tb->page_addr[0] != page_addr) {
759 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
763 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
764 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
765 tb_page_remove(&p->first_tb, tb);
766 invalidate_page_bitmap(p);
769 tb_invalidated_flag = 1;
771 /* remove the TB from the hash list */
772 h = tb_jmp_cache_hash_func(tb->pc);
773 for(env = first_cpu; env != NULL; env = env->next_cpu) {
774 if (env->tb_jmp_cache[h] == tb)
775 env->tb_jmp_cache[h] = NULL;
778 /* suppress this TB from the two jump lists */
779 tb_jmp_remove(tb, 0);
780 tb_jmp_remove(tb, 1);
782 /* suppress any remaining jumps to this TB */
788 tb1 = (TranslationBlock *)((long)tb1 & ~3);
789 tb2 = tb1->jmp_next[n1];
790 tb_reset_jump(tb1, n1);
791 tb1->jmp_next[n1] = NULL;
794 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
796 tb_phys_invalidate_count++;
799 static inline void set_bits(uint8_t *tab, int start, int len)
805 mask = 0xff << (start & 7);
806 if ((start & ~7) == (end & ~7)) {
808 mask &= ~(0xff << (end & 7));
813 start = (start + 8) & ~7;
815 while (start < end1) {
820 mask = ~(0xff << (end & 7));
826 static void build_page_bitmap(PageDesc *p)
828 int n, tb_start, tb_end;
829 TranslationBlock *tb;
831 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
836 tb = (TranslationBlock *)((long)tb & ~3);
837 /* NOTE: this is subtle as a TB may span two physical pages */
839 /* NOTE: tb_end may be after the end of the page, but
840 it is not a problem */
841 tb_start = tb->pc & ~TARGET_PAGE_MASK;
842 tb_end = tb_start + tb->size;
843 if (tb_end > TARGET_PAGE_SIZE)
844 tb_end = TARGET_PAGE_SIZE;
847 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
849 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
850 tb = tb->page_next[n];
854 TranslationBlock *tb_gen_code(CPUState *env,
855 target_ulong pc, target_ulong cs_base,
856 int flags, int cflags)
858 TranslationBlock *tb;
860 target_ulong phys_pc, phys_page2, virt_page2;
863 phys_pc = get_phys_addr_code(env, pc);
866 /* flush must be done */
868 /* cannot fail at this point */
870 /* Don't forget to invalidate previous TB info. */
871 tb_invalidated_flag = 1;
873 tc_ptr = code_gen_ptr;
875 tb->cs_base = cs_base;
878 cpu_gen_code(env, tb, &code_gen_size);
879 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
881 /* check next page if needed */
882 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
884 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
885 phys_page2 = get_phys_addr_code(env, virt_page2);
887 tb_link_phys(tb, phys_pc, phys_page2);
891 /* invalidate all TBs which intersect with the target physical page
892 starting in range [start;end[. NOTE: start and end must refer to
893 the same physical page. 'is_cpu_write_access' should be true if called
894 from a real cpu write access: the virtual CPU will exit the current
895 TB if code is modified inside this TB. */
896 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
897 int is_cpu_write_access)
899 TranslationBlock *tb, *tb_next, *saved_tb;
900 CPUState *env = cpu_single_env;
901 target_ulong tb_start, tb_end;
904 #ifdef TARGET_HAS_PRECISE_SMC
905 int current_tb_not_found = is_cpu_write_access;
906 TranslationBlock *current_tb = NULL;
907 int current_tb_modified = 0;
908 target_ulong current_pc = 0;
909 target_ulong current_cs_base = 0;
910 int current_flags = 0;
911 #endif /* TARGET_HAS_PRECISE_SMC */
913 p = page_find(start >> TARGET_PAGE_BITS);
916 if (!p->code_bitmap &&
917 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
918 is_cpu_write_access) {
919 /* build code bitmap */
920 build_page_bitmap(p);
923 /* we remove all the TBs in the range [start, end[ */
924 /* XXX: see if in some cases it could be faster to invalidate all the code */
928 tb = (TranslationBlock *)((long)tb & ~3);
929 tb_next = tb->page_next[n];
930 /* NOTE: this is subtle as a TB may span two physical pages */
932 /* NOTE: tb_end may be after the end of the page, but
933 it is not a problem */
934 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
935 tb_end = tb_start + tb->size;
937 tb_start = tb->page_addr[1];
938 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
940 if (!(tb_end <= start || tb_start >= end)) {
941 #ifdef TARGET_HAS_PRECISE_SMC
942 if (current_tb_not_found) {
943 current_tb_not_found = 0;
945 if (env->mem_io_pc) {
946 /* now we have a real cpu fault */
947 current_tb = tb_find_pc(env->mem_io_pc);
950 if (current_tb == tb &&
951 (current_tb->cflags & CF_COUNT_MASK) != 1) {
952 /* If we are modifying the current TB, we must stop
953 its execution. We could be more precise by checking
954 that the modification is after the current PC, but it
955 would require a specialized function to partially
956 restore the CPU state */
958 current_tb_modified = 1;
959 cpu_restore_state(current_tb, env,
960 env->mem_io_pc, NULL);
961 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
964 #endif /* TARGET_HAS_PRECISE_SMC */
965 /* we need to do that to handle the case where a signal
966 occurs while doing tb_phys_invalidate() */
969 saved_tb = env->current_tb;
970 env->current_tb = NULL;
972 tb_phys_invalidate(tb, -1);
974 env->current_tb = saved_tb;
975 if (env->interrupt_request && env->current_tb)
976 cpu_interrupt(env, env->interrupt_request);
981 #if !defined(CONFIG_USER_ONLY)
982 /* if no code remaining, no need to continue to use slow writes */
984 invalidate_page_bitmap(p);
985 if (is_cpu_write_access) {
986 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
990 #ifdef TARGET_HAS_PRECISE_SMC
991 if (current_tb_modified) {
992 /* we generate a block containing just the instruction
993 modifying the memory. It will ensure that it cannot modify
995 env->current_tb = NULL;
996 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
997 cpu_resume_from_signal(env, NULL);
1002 /* len must be <= 8 and start must be a multiple of len */
1003 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1009 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1010 cpu_single_env->mem_io_vaddr, len,
1011 cpu_single_env->eip,
1012 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1015 p = page_find(start >> TARGET_PAGE_BITS);
1018 if (p->code_bitmap) {
1019 offset = start & ~TARGET_PAGE_MASK;
1020 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1021 if (b & ((1 << len) - 1))
1025 tb_invalidate_phys_page_range(start, start + len, 1);
1029 #if !defined(CONFIG_SOFTMMU)
1030 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1031 unsigned long pc, void *puc)
1033 TranslationBlock *tb;
1036 #ifdef TARGET_HAS_PRECISE_SMC
1037 TranslationBlock *current_tb = NULL;
1038 CPUState *env = cpu_single_env;
1039 int current_tb_modified = 0;
1040 target_ulong current_pc = 0;
1041 target_ulong current_cs_base = 0;
1042 int current_flags = 0;
1045 addr &= TARGET_PAGE_MASK;
1046 p = page_find(addr >> TARGET_PAGE_BITS);
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 if (tb && pc != 0) {
1052 current_tb = tb_find_pc(pc);
1055 while (tb != NULL) {
1057 tb = (TranslationBlock *)((long)tb & ~3);
1058 #ifdef TARGET_HAS_PRECISE_SMC
1059 if (current_tb == tb &&
1060 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1061 /* If we are modifying the current TB, we must stop
1062 its execution. We could be more precise by checking
1063 that the modification is after the current PC, but it
1064 would require a specialized function to partially
1065 restore the CPU state */
1067 current_tb_modified = 1;
1068 cpu_restore_state(current_tb, env, pc, puc);
1069 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1072 #endif /* TARGET_HAS_PRECISE_SMC */
1073 tb_phys_invalidate(tb, addr);
1074 tb = tb->page_next[n];
1077 #ifdef TARGET_HAS_PRECISE_SMC
1078 if (current_tb_modified) {
1079 /* we generate a block containing just the instruction
1080 modifying the memory. It will ensure that it cannot modify
1082 env->current_tb = NULL;
1083 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1084 cpu_resume_from_signal(env, puc);
1090 /* add the tb in the target page and protect it if necessary */
1091 static inline void tb_alloc_page(TranslationBlock *tb,
1092 unsigned int n, target_ulong page_addr)
1095 TranslationBlock *last_first_tb;
1097 tb->page_addr[n] = page_addr;
1098 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1099 tb->page_next[n] = p->first_tb;
1100 last_first_tb = p->first_tb;
1101 p->first_tb = (TranslationBlock *)((long)tb | n);
1102 invalidate_page_bitmap(p);
1104 #if defined(TARGET_HAS_SMC) || 1
1106 #if defined(CONFIG_USER_ONLY)
1107 if (p->flags & PAGE_WRITE) {
1112 /* force the host page as non writable (writes will have a
1113 page fault + mprotect overhead) */
1114 page_addr &= qemu_host_page_mask;
1116 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1117 addr += TARGET_PAGE_SIZE) {
1119 p2 = page_find (addr >> TARGET_PAGE_BITS);
1123 p2->flags &= ~PAGE_WRITE;
1124 page_get_flags(addr);
1126 mprotect(g2h(page_addr), qemu_host_page_size,
1127 (prot & PAGE_BITS) & ~PAGE_WRITE);
1128 #ifdef DEBUG_TB_INVALIDATE
1129 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1134 /* if some code is already present, then the pages are already
1135 protected. So we handle the case where only the first TB is
1136 allocated in a physical page */
1137 if (!last_first_tb) {
1138 tlb_protect_code(page_addr);
1142 #endif /* TARGET_HAS_SMC */
1145 /* Allocate a new translation block. Flush the translation buffer if
1146 too many translation blocks or too much generated code. */
1147 TranslationBlock *tb_alloc(target_ulong pc)
1149 TranslationBlock *tb;
1151 if (nb_tbs >= code_gen_max_blocks ||
1152 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1154 tb = &tbs[nb_tbs++];
1160 void tb_free(TranslationBlock *tb)
1162 /* In practice this is mostly used for single use temporary TB
1163 Ignore the hard cases and just back up if this TB happens to
1164 be the last one generated. */
1165 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1166 code_gen_ptr = tb->tc_ptr;
1171 /* add a new TB and link it to the physical page tables. phys_page2 is
1172 (-1) to indicate that only one page contains the TB. */
1173 void tb_link_phys(TranslationBlock *tb,
1174 target_ulong phys_pc, target_ulong phys_page2)
1177 TranslationBlock **ptb;
1179 /* Grab the mmap lock to stop another thread invalidating this TB
1180 before we are done. */
1182 /* add in the physical hash table */
1183 h = tb_phys_hash_func(phys_pc);
1184 ptb = &tb_phys_hash[h];
1185 tb->phys_hash_next = *ptb;
1188 /* add in the page list */
1189 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1190 if (phys_page2 != -1)
1191 tb_alloc_page(tb, 1, phys_page2);
1193 tb->page_addr[1] = -1;
1195 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1196 tb->jmp_next[0] = NULL;
1197 tb->jmp_next[1] = NULL;
1199 /* init original jump addresses */
1200 if (tb->tb_next_offset[0] != 0xffff)
1201 tb_reset_jump(tb, 0);
1202 if (tb->tb_next_offset[1] != 0xffff)
1203 tb_reset_jump(tb, 1);
1205 #ifdef DEBUG_TB_CHECK
1211 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1212 tb[1].tc_ptr. Return NULL if not found */
1213 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1215 int m_min, m_max, m;
1217 TranslationBlock *tb;
1221 if (tc_ptr < (unsigned long)code_gen_buffer ||
1222 tc_ptr >= (unsigned long)code_gen_ptr)
1224 /* binary search (cf Knuth) */
1227 while (m_min <= m_max) {
1228 m = (m_min + m_max) >> 1;
1230 v = (unsigned long)tb->tc_ptr;
1233 else if (tc_ptr < v) {
1242 static void tb_reset_jump_recursive(TranslationBlock *tb);
1244 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1246 TranslationBlock *tb1, *tb_next, **ptb;
1249 tb1 = tb->jmp_next[n];
1251 /* find head of list */
1254 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1257 tb1 = tb1->jmp_next[n1];
1259 /* we are now sure now that tb jumps to tb1 */
1262 /* remove tb from the jmp_first list */
1263 ptb = &tb_next->jmp_first;
1267 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268 if (n1 == n && tb1 == tb)
1270 ptb = &tb1->jmp_next[n1];
1272 *ptb = tb->jmp_next[n];
1273 tb->jmp_next[n] = NULL;
1275 /* suppress the jump to next tb in generated code */
1276 tb_reset_jump(tb, n);
1278 /* suppress jumps in the tb on which we could have jumped */
1279 tb_reset_jump_recursive(tb_next);
1283 static void tb_reset_jump_recursive(TranslationBlock *tb)
1285 tb_reset_jump_recursive2(tb, 0);
1286 tb_reset_jump_recursive2(tb, 1);
1289 #if defined(TARGET_HAS_ICE)
1290 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1292 target_phys_addr_t addr;
1294 ram_addr_t ram_addr;
1297 addr = cpu_get_phys_page_debug(env, pc);
1298 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1300 pd = IO_MEM_UNASSIGNED;
1302 pd = p->phys_offset;
1304 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1305 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1309 /* Add a watchpoint. */
1310 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1311 int flags, CPUWatchpoint **watchpoint)
1313 target_ulong len_mask = ~(len - 1);
1316 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1317 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1318 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1319 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1322 wp = qemu_malloc(sizeof(*wp));
1325 wp->len_mask = len_mask;
1328 /* keep all GDB-injected watchpoints in front */
1330 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1332 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1334 tlb_flush_page(env, addr);
1341 /* Remove a specific watchpoint. */
1342 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1345 target_ulong len_mask = ~(len - 1);
1348 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1349 if (addr == wp->vaddr && len_mask == wp->len_mask
1350 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1351 cpu_watchpoint_remove_by_ref(env, wp);
1358 /* Remove a specific watchpoint by reference. */
1359 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1361 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1363 tlb_flush_page(env, watchpoint->vaddr);
1365 qemu_free(watchpoint);
1368 /* Remove all matching watchpoints. */
1369 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1371 CPUWatchpoint *wp, *next;
1373 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1374 if (wp->flags & mask)
1375 cpu_watchpoint_remove_by_ref(env, wp);
1379 /* Add a breakpoint. */
1380 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1381 CPUBreakpoint **breakpoint)
1383 #if defined(TARGET_HAS_ICE)
1386 bp = qemu_malloc(sizeof(*bp));
1391 /* keep all GDB-injected breakpoints in front */
1393 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1395 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1397 breakpoint_invalidate(env, pc);
1407 /* Remove a specific breakpoint. */
1408 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1410 #if defined(TARGET_HAS_ICE)
1413 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1414 if (bp->pc == pc && bp->flags == flags) {
1415 cpu_breakpoint_remove_by_ref(env, bp);
1425 /* Remove a specific breakpoint by reference. */
1426 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1428 #if defined(TARGET_HAS_ICE)
1429 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1431 breakpoint_invalidate(env, breakpoint->pc);
1433 qemu_free(breakpoint);
1437 /* Remove all matching breakpoints. */
1438 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1440 #if defined(TARGET_HAS_ICE)
1441 CPUBreakpoint *bp, *next;
1443 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1444 if (bp->flags & mask)
1445 cpu_breakpoint_remove_by_ref(env, bp);
1450 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1451 CPU loop after each instruction */
1452 void cpu_single_step(CPUState *env, int enabled)
1454 #if defined(TARGET_HAS_ICE)
1455 if (env->singlestep_enabled != enabled) {
1456 env->singlestep_enabled = enabled;
1458 kvm_update_guest_debug(env, 0);
1460 /* must flush all the translated code to avoid inconsistancies */
1461 /* XXX: only flush what is necessary */
1468 /* enable or disable low levels log */
1469 void cpu_set_log(int log_flags)
1471 loglevel = log_flags;
1472 if (loglevel && !logfile) {
1473 logfile = fopen(logfilename, log_append ? "a" : "w");
1475 perror(logfilename);
1478 #if !defined(CONFIG_SOFTMMU)
1479 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1481 static char logfile_buf[4096];
1482 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1485 setvbuf(logfile, NULL, _IOLBF, 0);
1489 if (!loglevel && logfile) {
1495 void cpu_set_log_filename(const char *filename)
1497 logfilename = strdup(filename);
1502 cpu_set_log(loglevel);
1505 static void cpu_unlink_tb(CPUState *env)
1507 #if defined(USE_NPTL)
1508 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1509 problem and hope the cpu will stop of its own accord. For userspace
1510 emulation this often isn't actually as bad as it sounds. Often
1511 signals are used primarily to interrupt blocking syscalls. */
1513 TranslationBlock *tb;
1514 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1516 tb = env->current_tb;
1517 /* if the cpu is currently executing code, we must unlink it and
1518 all the potentially executing TB */
1519 if (tb && !testandset(&interrupt_lock)) {
1520 env->current_tb = NULL;
1521 tb_reset_jump_recursive(tb);
1522 resetlock(&interrupt_lock);
1527 /* mask must never be zero, except for A20 change call */
1528 void cpu_interrupt(CPUState *env, int mask)
1532 old_mask = env->interrupt_request;
1533 env->interrupt_request |= mask;
1536 env->icount_decr.u16.high = 0xffff;
1537 #ifndef CONFIG_USER_ONLY
1539 && (mask & ~old_mask) != 0) {
1540 cpu_abort(env, "Raised interrupt while not in I/O function");
1548 void cpu_reset_interrupt(CPUState *env, int mask)
1550 env->interrupt_request &= ~mask;
1553 void cpu_exit(CPUState *env)
1555 env->exit_request = 1;
1559 const CPULogItem cpu_log_items[] = {
1560 { CPU_LOG_TB_OUT_ASM, "out_asm",
1561 "show generated host assembly code for each compiled TB" },
1562 { CPU_LOG_TB_IN_ASM, "in_asm",
1563 "show target assembly code for each compiled TB" },
1564 { CPU_LOG_TB_OP, "op",
1565 "show micro ops for each compiled TB" },
1566 { CPU_LOG_TB_OP_OPT, "op_opt",
1569 "before eflags optimization and "
1571 "after liveness analysis" },
1572 { CPU_LOG_INT, "int",
1573 "show interrupts/exceptions in short format" },
1574 { CPU_LOG_EXEC, "exec",
1575 "show trace before each executed TB (lots of logs)" },
1576 { CPU_LOG_TB_CPU, "cpu",
1577 "show CPU state before block translation" },
1579 { CPU_LOG_PCALL, "pcall",
1580 "show protected mode far calls/returns/exceptions" },
1581 { CPU_LOG_RESET, "cpu_reset",
1582 "show CPU state before CPU resets" },
1585 { CPU_LOG_IOPORT, "ioport",
1586 "show all i/o ports accesses" },
1591 static int cmp1(const char *s1, int n, const char *s2)
1593 if (strlen(s2) != n)
1595 return memcmp(s1, s2, n) == 0;
1598 /* takes a comma separated list of log masks. Return 0 if error. */
1599 int cpu_str_to_log_mask(const char *str)
1601 const CPULogItem *item;
1608 p1 = strchr(p, ',');
1611 if(cmp1(p,p1-p,"all")) {
1612 for(item = cpu_log_items; item->mask != 0; item++) {
1616 for(item = cpu_log_items; item->mask != 0; item++) {
1617 if (cmp1(p, p1 - p, item->name))
1631 void cpu_abort(CPUState *env, const char *fmt, ...)
1638 fprintf(stderr, "qemu: fatal: ");
1639 vfprintf(stderr, fmt, ap);
1640 fprintf(stderr, "\n");
1642 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1644 cpu_dump_state(env, stderr, fprintf, 0);
1646 if (qemu_log_enabled()) {
1647 qemu_log("qemu: fatal: ");
1648 qemu_log_vprintf(fmt, ap2);
1651 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1653 log_cpu_state(env, 0);
1663 CPUState *cpu_copy(CPUState *env)
1665 CPUState *new_env = cpu_init(env->cpu_model_str);
1666 CPUState *next_cpu = new_env->next_cpu;
1667 int cpu_index = new_env->cpu_index;
1668 #if defined(TARGET_HAS_ICE)
1673 memcpy(new_env, env, sizeof(CPUState));
1675 /* Preserve chaining and index. */
1676 new_env->next_cpu = next_cpu;
1677 new_env->cpu_index = cpu_index;
1679 /* Clone all break/watchpoints.
1680 Note: Once we support ptrace with hw-debug register access, make sure
1681 BP_CPU break/watchpoints are handled correctly on clone. */
1682 TAILQ_INIT(&env->breakpoints);
1683 TAILQ_INIT(&env->watchpoints);
1684 #if defined(TARGET_HAS_ICE)
1685 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1686 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1688 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1689 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1697 #if !defined(CONFIG_USER_ONLY)
1699 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1703 /* Discard jump cache entries for any tb which might potentially
1704 overlap the flushed page. */
1705 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1706 memset (&env->tb_jmp_cache[i], 0,
1707 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1709 i = tb_jmp_cache_hash_page(addr);
1710 memset (&env->tb_jmp_cache[i], 0,
1711 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1714 /* NOTE: if flush_global is true, also flush global entries (not
1716 void tlb_flush(CPUState *env, int flush_global)
1720 #if defined(DEBUG_TLB)
1721 printf("tlb_flush:\n");
1723 /* must reset current TB so that interrupts cannot modify the
1724 links while we are modifying them */
1725 env->current_tb = NULL;
1727 for(i = 0; i < CPU_TLB_SIZE; i++) {
1728 env->tlb_table[0][i].addr_read = -1;
1729 env->tlb_table[0][i].addr_write = -1;
1730 env->tlb_table[0][i].addr_code = -1;
1731 env->tlb_table[1][i].addr_read = -1;
1732 env->tlb_table[1][i].addr_write = -1;
1733 env->tlb_table[1][i].addr_code = -1;
1734 #if (NB_MMU_MODES >= 3)
1735 env->tlb_table[2][i].addr_read = -1;
1736 env->tlb_table[2][i].addr_write = -1;
1737 env->tlb_table[2][i].addr_code = -1;
1738 #if (NB_MMU_MODES == 4)
1739 env->tlb_table[3][i].addr_read = -1;
1740 env->tlb_table[3][i].addr_write = -1;
1741 env->tlb_table[3][i].addr_code = -1;
1746 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1749 if (env->kqemu_enabled) {
1750 kqemu_flush(env, flush_global);
1756 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1758 if (addr == (tlb_entry->addr_read &
1759 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1760 addr == (tlb_entry->addr_write &
1761 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1762 addr == (tlb_entry->addr_code &
1763 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1764 tlb_entry->addr_read = -1;
1765 tlb_entry->addr_write = -1;
1766 tlb_entry->addr_code = -1;
1770 void tlb_flush_page(CPUState *env, target_ulong addr)
1774 #if defined(DEBUG_TLB)
1775 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1777 /* must reset current TB so that interrupts cannot modify the
1778 links while we are modifying them */
1779 env->current_tb = NULL;
1781 addr &= TARGET_PAGE_MASK;
1782 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1783 tlb_flush_entry(&env->tlb_table[0][i], addr);
1784 tlb_flush_entry(&env->tlb_table[1][i], addr);
1785 #if (NB_MMU_MODES >= 3)
1786 tlb_flush_entry(&env->tlb_table[2][i], addr);
1787 #if (NB_MMU_MODES == 4)
1788 tlb_flush_entry(&env->tlb_table[3][i], addr);
1792 tlb_flush_jmp_cache(env, addr);
1795 if (env->kqemu_enabled) {
1796 kqemu_flush_page(env, addr);
1801 /* update the TLBs so that writes to code in the virtual page 'addr'
1803 static void tlb_protect_code(ram_addr_t ram_addr)
1805 cpu_physical_memory_reset_dirty(ram_addr,
1806 ram_addr + TARGET_PAGE_SIZE,
1810 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1811 tested for self modifying code */
1812 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1815 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1818 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1819 unsigned long start, unsigned long length)
1822 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1823 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1824 if ((addr - start) < length) {
1825 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1830 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1834 unsigned long length, start1;
1838 start &= TARGET_PAGE_MASK;
1839 end = TARGET_PAGE_ALIGN(end);
1841 length = end - start;
1844 len = length >> TARGET_PAGE_BITS;
1846 /* XXX: should not depend on cpu context */
1848 if (env->kqemu_enabled) {
1851 for(i = 0; i < len; i++) {
1852 kqemu_set_notdirty(env, addr);
1853 addr += TARGET_PAGE_SIZE;
1857 mask = ~dirty_flags;
1858 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1859 for(i = 0; i < len; i++)
1862 /* we modify the TLB cache so that the dirty bit will be set again
1863 when accessing the range */
1864 start1 = start + (unsigned long)phys_ram_base;
1865 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1866 for(i = 0; i < CPU_TLB_SIZE; i++)
1867 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1868 for(i = 0; i < CPU_TLB_SIZE; i++)
1869 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1870 #if (NB_MMU_MODES >= 3)
1871 for(i = 0; i < CPU_TLB_SIZE; i++)
1872 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1873 #if (NB_MMU_MODES == 4)
1874 for(i = 0; i < CPU_TLB_SIZE; i++)
1875 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1881 int cpu_physical_memory_set_dirty_tracking(int enable)
1883 in_migration = enable;
1887 int cpu_physical_memory_get_dirty_tracking(void)
1889 return in_migration;
1892 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1895 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1898 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1900 ram_addr_t ram_addr;
1902 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1903 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1904 tlb_entry->addend - (unsigned long)phys_ram_base;
1905 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1906 tlb_entry->addr_write |= TLB_NOTDIRTY;
1911 /* update the TLB according to the current state of the dirty bits */
1912 void cpu_tlb_update_dirty(CPUState *env)
1915 for(i = 0; i < CPU_TLB_SIZE; i++)
1916 tlb_update_dirty(&env->tlb_table[0][i]);
1917 for(i = 0; i < CPU_TLB_SIZE; i++)
1918 tlb_update_dirty(&env->tlb_table[1][i]);
1919 #if (NB_MMU_MODES >= 3)
1920 for(i = 0; i < CPU_TLB_SIZE; i++)
1921 tlb_update_dirty(&env->tlb_table[2][i]);
1922 #if (NB_MMU_MODES == 4)
1923 for(i = 0; i < CPU_TLB_SIZE; i++)
1924 tlb_update_dirty(&env->tlb_table[3][i]);
1929 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1931 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1932 tlb_entry->addr_write = vaddr;
1935 /* update the TLB corresponding to virtual page vaddr
1936 so that it is no longer dirty */
1937 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1941 vaddr &= TARGET_PAGE_MASK;
1942 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1943 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1944 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1945 #if (NB_MMU_MODES >= 3)
1946 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1947 #if (NB_MMU_MODES == 4)
1948 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1953 /* add a new TLB entry. At most one entry for a given virtual address
1954 is permitted. Return 0 if OK or 2 if the page could not be mapped
1955 (can only happen in non SOFTMMU mode for I/O pages or pages
1956 conflicting with the host address space). */
1957 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1958 target_phys_addr_t paddr, int prot,
1959 int mmu_idx, int is_softmmu)
1964 target_ulong address;
1965 target_ulong code_address;
1966 target_phys_addr_t addend;
1970 target_phys_addr_t iotlb;
1972 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1974 pd = IO_MEM_UNASSIGNED;
1976 pd = p->phys_offset;
1978 #if defined(DEBUG_TLB)
1979 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1980 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1985 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1986 /* IO memory case (romd handled later) */
1987 address |= TLB_MMIO;
1989 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1990 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1992 iotlb = pd & TARGET_PAGE_MASK;
1993 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1994 iotlb |= IO_MEM_NOTDIRTY;
1996 iotlb |= IO_MEM_ROM;
1998 /* IO handlers are currently passed a phsical address.
1999 It would be nice to pass an offset from the base address
2000 of that region. This would avoid having to special case RAM,
2001 and avoid full address decoding in every device.
2002 We can't use the high bits of pd for this because
2003 IO_MEM_ROMD uses these as a ram address. */
2004 iotlb = (pd & ~TARGET_PAGE_MASK);
2006 iotlb += p->region_offset;
2012 code_address = address;
2013 /* Make accesses to pages with watchpoints go via the
2014 watchpoint trap routines. */
2015 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2016 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2017 iotlb = io_mem_watch + paddr;
2018 /* TODO: The memory case can be optimized by not trapping
2019 reads of pages with a write breakpoint. */
2020 address |= TLB_MMIO;
2024 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2025 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2026 te = &env->tlb_table[mmu_idx][index];
2027 te->addend = addend - vaddr;
2028 if (prot & PAGE_READ) {
2029 te->addr_read = address;
2034 if (prot & PAGE_EXEC) {
2035 te->addr_code = code_address;
2039 if (prot & PAGE_WRITE) {
2040 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2041 (pd & IO_MEM_ROMD)) {
2042 /* Write access calls the I/O callback. */
2043 te->addr_write = address | TLB_MMIO;
2044 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2045 !cpu_physical_memory_is_dirty(pd)) {
2046 te->addr_write = address | TLB_NOTDIRTY;
2048 te->addr_write = address;
2051 te->addr_write = -1;
2058 void tlb_flush(CPUState *env, int flush_global)
2062 void tlb_flush_page(CPUState *env, target_ulong addr)
2066 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2067 target_phys_addr_t paddr, int prot,
2068 int mmu_idx, int is_softmmu)
2073 void walk_memory_regions(void *priv,
2074 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2076 unsigned long start, end;
2078 int i, j, prot, prot1;
2083 for (i = 0; i <= L1_SIZE; i++) {
2084 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2085 for (j = 0; j < L2_SIZE; j++) {
2086 prot1 = (p == NULL) ? 0 : p[j].flags;
2088 * "region" is one continuous chunk of memory
2089 * that has same protection flags set.
2091 if (prot1 != prot) {
2092 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2094 if ((*fn)(priv, start, end, prot) != 0)
2108 ; /* null statement to make compiler happy */
2111 static int dump_region(void *priv, unsigned long start,
2112 unsigned long end, unsigned long prot)
2114 FILE *f = (FILE *)priv;
2116 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2117 start, end, end - start,
2118 ((prot & PAGE_READ) ? 'r' : '-'),
2119 ((prot & PAGE_WRITE) ? 'w' : '-'),
2120 ((prot & PAGE_EXEC) ? 'x' : '-'));
2125 /* dump memory mappings */
2126 void page_dump(FILE *f)
2128 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2129 "start", "end", "size", "prot");
2130 walk_memory_regions(f, dump_region);
2133 int page_get_flags(target_ulong address)
2137 p = page_find(address >> TARGET_PAGE_BITS);
2143 /* modify the flags of a page and invalidate the code if
2144 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2145 depending on PAGE_WRITE */
2146 void page_set_flags(target_ulong start, target_ulong end, int flags)
2151 /* mmap_lock should already be held. */
2152 start = start & TARGET_PAGE_MASK;
2153 end = TARGET_PAGE_ALIGN(end);
2154 if (flags & PAGE_WRITE)
2155 flags |= PAGE_WRITE_ORG;
2156 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2157 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2158 /* We may be called for host regions that are outside guest
2162 /* if the write protection is set, then we invalidate the code
2164 if (!(p->flags & PAGE_WRITE) &&
2165 (flags & PAGE_WRITE) &&
2167 tb_invalidate_phys_page(addr, 0, NULL);
2173 int page_check_range(target_ulong start, target_ulong len, int flags)
2179 if (start + len < start)
2180 /* we've wrapped around */
2183 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2184 start = start & TARGET_PAGE_MASK;
2186 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2187 p = page_find(addr >> TARGET_PAGE_BITS);
2190 if( !(p->flags & PAGE_VALID) )
2193 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2195 if (flags & PAGE_WRITE) {
2196 if (!(p->flags & PAGE_WRITE_ORG))
2198 /* unprotect the page if it was put read-only because it
2199 contains translated code */
2200 if (!(p->flags & PAGE_WRITE)) {
2201 if (!page_unprotect(addr, 0, NULL))
2210 /* called from signal handler: invalidate the code and unprotect the
2211 page. Return TRUE if the fault was succesfully handled. */
2212 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2214 unsigned int page_index, prot, pindex;
2216 target_ulong host_start, host_end, addr;
2218 /* Technically this isn't safe inside a signal handler. However we
2219 know this only ever happens in a synchronous SEGV handler, so in
2220 practice it seems to be ok. */
2223 host_start = address & qemu_host_page_mask;
2224 page_index = host_start >> TARGET_PAGE_BITS;
2225 p1 = page_find(page_index);
2230 host_end = host_start + qemu_host_page_size;
2233 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2237 /* if the page was really writable, then we change its
2238 protection back to writable */
2239 if (prot & PAGE_WRITE_ORG) {
2240 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2241 if (!(p1[pindex].flags & PAGE_WRITE)) {
2242 mprotect((void *)g2h(host_start), qemu_host_page_size,
2243 (prot & PAGE_BITS) | PAGE_WRITE);
2244 p1[pindex].flags |= PAGE_WRITE;
2245 /* and since the content will be modified, we must invalidate
2246 the corresponding translated code. */
2247 tb_invalidate_phys_page(address, pc, puc);
2248 #ifdef DEBUG_TB_CHECK
2249 tb_invalidate_check(address);
2259 static inline void tlb_set_dirty(CPUState *env,
2260 unsigned long addr, target_ulong vaddr)
2263 #endif /* defined(CONFIG_USER_ONLY) */
2265 #if !defined(CONFIG_USER_ONLY)
2267 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2268 ram_addr_t memory, ram_addr_t region_offset);
2269 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2270 ram_addr_t orig_memory, ram_addr_t region_offset);
2271 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2274 if (addr > start_addr) \
2277 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2278 if (start_addr2 > 0) \
2282 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2283 end_addr2 = TARGET_PAGE_SIZE - 1; \
2285 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2286 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2291 /* register physical memory. 'size' must be a multiple of the target
2292 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2293 io memory page. The address used when calling the IO function is
2294 the offset from the start of the region, plus region_offset. Both
2295 start_region and regon_offset are rounded down to a page boundary
2296 before calculating this offset. This should not be a problem unless
2297 the low bits of start_addr and region_offset differ. */
2298 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2300 ram_addr_t phys_offset,
2301 ram_addr_t region_offset)
2303 target_phys_addr_t addr, end_addr;
2306 ram_addr_t orig_size = size;
2310 /* XXX: should not depend on cpu context */
2312 if (env->kqemu_enabled) {
2313 kqemu_set_phys_mem(start_addr, size, phys_offset);
2317 kvm_set_phys_mem(start_addr, size, phys_offset);
2319 if (phys_offset == IO_MEM_UNASSIGNED) {
2320 region_offset = start_addr;
2322 region_offset &= TARGET_PAGE_MASK;
2323 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2324 end_addr = start_addr + (target_phys_addr_t)size;
2325 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2326 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2327 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2328 ram_addr_t orig_memory = p->phys_offset;
2329 target_phys_addr_t start_addr2, end_addr2;
2330 int need_subpage = 0;
2332 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2334 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2335 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2336 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2337 &p->phys_offset, orig_memory,
2340 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2343 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2345 p->region_offset = 0;
2347 p->phys_offset = phys_offset;
2348 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2349 (phys_offset & IO_MEM_ROMD))
2350 phys_offset += TARGET_PAGE_SIZE;
2353 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2354 p->phys_offset = phys_offset;
2355 p->region_offset = region_offset;
2356 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2357 (phys_offset & IO_MEM_ROMD)) {
2358 phys_offset += TARGET_PAGE_SIZE;
2360 target_phys_addr_t start_addr2, end_addr2;
2361 int need_subpage = 0;
2363 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2364 end_addr2, need_subpage);
2366 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2367 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2368 &p->phys_offset, IO_MEM_UNASSIGNED,
2369 addr & TARGET_PAGE_MASK);
2370 subpage_register(subpage, start_addr2, end_addr2,
2371 phys_offset, region_offset);
2372 p->region_offset = 0;
2376 region_offset += TARGET_PAGE_SIZE;
2379 /* since each CPU stores ram addresses in its TLB cache, we must
2380 reset the modified entries */
2382 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2387 /* XXX: temporary until new memory mapping API */
2388 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2392 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2394 return IO_MEM_UNASSIGNED;
2395 return p->phys_offset;
2398 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2401 kvm_coalesce_mmio_region(addr, size);
2404 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2407 kvm_uncoalesce_mmio_region(addr, size);
2410 /* XXX: better than nothing */
2411 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2414 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2415 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2416 (uint64_t)size, (uint64_t)phys_ram_size);
2419 addr = phys_ram_alloc_offset;
2420 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2424 void qemu_ram_free(ram_addr_t addr)
2428 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2430 #ifdef DEBUG_UNASSIGNED
2431 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2433 #if defined(TARGET_SPARC)
2434 do_unassigned_access(addr, 0, 0, 0, 1);
2439 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2441 #ifdef DEBUG_UNASSIGNED
2442 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2444 #if defined(TARGET_SPARC)
2445 do_unassigned_access(addr, 0, 0, 0, 2);
2450 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2452 #ifdef DEBUG_UNASSIGNED
2453 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2455 #if defined(TARGET_SPARC)
2456 do_unassigned_access(addr, 0, 0, 0, 4);
2461 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2463 #ifdef DEBUG_UNASSIGNED
2464 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2466 #if defined(TARGET_SPARC)
2467 do_unassigned_access(addr, 1, 0, 0, 1);
2471 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2473 #ifdef DEBUG_UNASSIGNED
2474 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2476 #if defined(TARGET_SPARC)
2477 do_unassigned_access(addr, 1, 0, 0, 2);
2481 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2483 #ifdef DEBUG_UNASSIGNED
2484 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2486 #if defined(TARGET_SPARC)
2487 do_unassigned_access(addr, 1, 0, 0, 4);
2491 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2492 unassigned_mem_readb,
2493 unassigned_mem_readw,
2494 unassigned_mem_readl,
2497 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2498 unassigned_mem_writeb,
2499 unassigned_mem_writew,
2500 unassigned_mem_writel,
2503 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2507 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2508 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2509 #if !defined(CONFIG_USER_ONLY)
2510 tb_invalidate_phys_page_fast(ram_addr, 1);
2511 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2514 stb_p(phys_ram_base + ram_addr, val);
2516 if (cpu_single_env->kqemu_enabled &&
2517 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2518 kqemu_modify_page(cpu_single_env, ram_addr);
2520 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2521 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2522 /* we remove the notdirty callback only if the code has been
2524 if (dirty_flags == 0xff)
2525 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2528 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2532 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2533 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2534 #if !defined(CONFIG_USER_ONLY)
2535 tb_invalidate_phys_page_fast(ram_addr, 2);
2536 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2539 stw_p(phys_ram_base + ram_addr, val);
2541 if (cpu_single_env->kqemu_enabled &&
2542 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2543 kqemu_modify_page(cpu_single_env, ram_addr);
2545 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2546 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2547 /* we remove the notdirty callback only if the code has been
2549 if (dirty_flags == 0xff)
2550 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2553 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2557 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2558 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2559 #if !defined(CONFIG_USER_ONLY)
2560 tb_invalidate_phys_page_fast(ram_addr, 4);
2561 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2564 stl_p(phys_ram_base + ram_addr, val);
2566 if (cpu_single_env->kqemu_enabled &&
2567 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2568 kqemu_modify_page(cpu_single_env, ram_addr);
2570 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2571 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2572 /* we remove the notdirty callback only if the code has been
2574 if (dirty_flags == 0xff)
2575 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2578 static CPUReadMemoryFunc *error_mem_read[3] = {
2579 NULL, /* never used */
2580 NULL, /* never used */
2581 NULL, /* never used */
2584 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2585 notdirty_mem_writeb,
2586 notdirty_mem_writew,
2587 notdirty_mem_writel,
2590 /* Generate a debug exception if a watchpoint has been hit. */
2591 static void check_watchpoint(int offset, int len_mask, int flags)
2593 CPUState *env = cpu_single_env;
2594 target_ulong pc, cs_base;
2595 TranslationBlock *tb;
2600 if (env->watchpoint_hit) {
2601 /* We re-entered the check after replacing the TB. Now raise
2602 * the debug interrupt so that is will trigger after the
2603 * current instruction. */
2604 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2607 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2608 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2609 if ((vaddr == (wp->vaddr & len_mask) ||
2610 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2611 wp->flags |= BP_WATCHPOINT_HIT;
2612 if (!env->watchpoint_hit) {
2613 env->watchpoint_hit = wp;
2614 tb = tb_find_pc(env->mem_io_pc);
2616 cpu_abort(env, "check_watchpoint: could not find TB for "
2617 "pc=%p", (void *)env->mem_io_pc);
2619 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2620 tb_phys_invalidate(tb, -1);
2621 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2622 env->exception_index = EXCP_DEBUG;
2624 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2625 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2627 cpu_resume_from_signal(env, NULL);
2630 wp->flags &= ~BP_WATCHPOINT_HIT;
2635 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2636 so these check for a hit then pass through to the normal out-of-line
2638 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2640 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2641 return ldub_phys(addr);
2644 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2646 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2647 return lduw_phys(addr);
2650 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2652 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2653 return ldl_phys(addr);
2656 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2659 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2660 stb_phys(addr, val);
2663 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2666 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2667 stw_phys(addr, val);
2670 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2673 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2674 stl_phys(addr, val);
2677 static CPUReadMemoryFunc *watch_mem_read[3] = {
2683 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2689 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2695 idx = SUBPAGE_IDX(addr);
2696 #if defined(DEBUG_SUBPAGE)
2697 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2698 mmio, len, addr, idx);
2700 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2701 addr + mmio->region_offset[idx][0][len]);
2706 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2707 uint32_t value, unsigned int len)
2711 idx = SUBPAGE_IDX(addr);
2712 #if defined(DEBUG_SUBPAGE)
2713 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2714 mmio, len, addr, idx, value);
2716 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2717 addr + mmio->region_offset[idx][1][len],
2721 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2723 #if defined(DEBUG_SUBPAGE)
2724 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2727 return subpage_readlen(opaque, addr, 0);
2730 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2733 #if defined(DEBUG_SUBPAGE)
2734 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2736 subpage_writelen(opaque, addr, value, 0);
2739 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2741 #if defined(DEBUG_SUBPAGE)
2742 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2745 return subpage_readlen(opaque, addr, 1);
2748 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2751 #if defined(DEBUG_SUBPAGE)
2752 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2754 subpage_writelen(opaque, addr, value, 1);
2757 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2759 #if defined(DEBUG_SUBPAGE)
2760 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2763 return subpage_readlen(opaque, addr, 2);
2766 static void subpage_writel (void *opaque,
2767 target_phys_addr_t addr, uint32_t value)
2769 #if defined(DEBUG_SUBPAGE)
2770 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2772 subpage_writelen(opaque, addr, value, 2);
2775 static CPUReadMemoryFunc *subpage_read[] = {
2781 static CPUWriteMemoryFunc *subpage_write[] = {
2787 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2788 ram_addr_t memory, ram_addr_t region_offset)
2793 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2795 idx = SUBPAGE_IDX(start);
2796 eidx = SUBPAGE_IDX(end);
2797 #if defined(DEBUG_SUBPAGE)
2798 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2799 mmio, start, end, idx, eidx, memory);
2801 memory >>= IO_MEM_SHIFT;
2802 for (; idx <= eidx; idx++) {
2803 for (i = 0; i < 4; i++) {
2804 if (io_mem_read[memory][i]) {
2805 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2806 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2807 mmio->region_offset[idx][0][i] = region_offset;
2809 if (io_mem_write[memory][i]) {
2810 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2811 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2812 mmio->region_offset[idx][1][i] = region_offset;
2820 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2821 ram_addr_t orig_memory, ram_addr_t region_offset)
2826 mmio = qemu_mallocz(sizeof(subpage_t));
2829 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2830 #if defined(DEBUG_SUBPAGE)
2831 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2832 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2834 *phys = subpage_memory | IO_MEM_SUBPAGE;
2835 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2841 static int get_free_io_mem_idx(void)
2845 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2846 if (!io_mem_used[i]) {
2850 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
2854 static void io_mem_init(void)
2858 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2859 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2860 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2864 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2865 watch_mem_write, NULL);
2866 /* alloc dirty bits array */
2867 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2868 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2871 /* mem_read and mem_write are arrays of functions containing the
2872 function to access byte (index 0), word (index 1) and dword (index
2873 2). Functions can be omitted with a NULL function pointer. The
2874 registered functions may be modified dynamically later.
2875 If io_index is non zero, the corresponding io zone is
2876 modified. If it is zero, a new io zone is allocated. The return
2877 value can be used with cpu_register_physical_memory(). (-1) is
2878 returned if error. */
2879 int cpu_register_io_memory(int io_index,
2880 CPUReadMemoryFunc **mem_read,
2881 CPUWriteMemoryFunc **mem_write,
2884 int i, subwidth = 0;
2886 if (io_index <= 0) {
2887 io_index = get_free_io_mem_idx();
2891 if (io_index >= IO_MEM_NB_ENTRIES)
2895 for(i = 0;i < 3; i++) {
2896 if (!mem_read[i] || !mem_write[i])
2897 subwidth = IO_MEM_SUBWIDTH;
2898 io_mem_read[io_index][i] = mem_read[i];
2899 io_mem_write[io_index][i] = mem_write[i];
2901 io_mem_opaque[io_index] = opaque;
2902 return (io_index << IO_MEM_SHIFT) | subwidth;
2905 void cpu_unregister_io_memory(int io_table_address)
2908 int io_index = io_table_address >> IO_MEM_SHIFT;
2910 for (i=0;i < 3; i++) {
2911 io_mem_read[io_index][i] = unassigned_mem_read[i];
2912 io_mem_write[io_index][i] = unassigned_mem_write[i];
2914 io_mem_opaque[io_index] = NULL;
2915 io_mem_used[io_index] = 0;
2918 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2920 return io_mem_write[io_index >> IO_MEM_SHIFT];
2923 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2925 return io_mem_read[io_index >> IO_MEM_SHIFT];
2928 #endif /* !defined(CONFIG_USER_ONLY) */
2930 /* physical memory access (slow version, mainly for debug) */
2931 #if defined(CONFIG_USER_ONLY)
2932 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2933 int len, int is_write)
2940 page = addr & TARGET_PAGE_MASK;
2941 l = (page + TARGET_PAGE_SIZE) - addr;
2944 flags = page_get_flags(page);
2945 if (!(flags & PAGE_VALID))
2948 if (!(flags & PAGE_WRITE))
2950 /* XXX: this code should not depend on lock_user */
2951 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2952 /* FIXME - should this return an error rather than just fail? */
2955 unlock_user(p, addr, l);
2957 if (!(flags & PAGE_READ))
2959 /* XXX: this code should not depend on lock_user */
2960 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2961 /* FIXME - should this return an error rather than just fail? */
2964 unlock_user(p, addr, 0);
2973 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2974 int len, int is_write)
2979 target_phys_addr_t page;
2984 page = addr & TARGET_PAGE_MASK;
2985 l = (page + TARGET_PAGE_SIZE) - addr;
2988 p = phys_page_find(page >> TARGET_PAGE_BITS);
2990 pd = IO_MEM_UNASSIGNED;
2992 pd = p->phys_offset;
2996 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2997 target_phys_addr_t addr1 = addr;
2998 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3000 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3001 /* XXX: could force cpu_single_env to NULL to avoid
3003 if (l >= 4 && ((addr1 & 3) == 0)) {
3004 /* 32 bit write access */
3006 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3008 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3009 /* 16 bit write access */
3011 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3014 /* 8 bit write access */
3016 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3020 unsigned long addr1;
3021 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3023 ptr = phys_ram_base + addr1;
3024 memcpy(ptr, buf, l);
3025 if (!cpu_physical_memory_is_dirty(addr1)) {
3026 /* invalidate code */
3027 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3029 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3030 (0xff & ~CODE_DIRTY_FLAG);
3034 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3035 !(pd & IO_MEM_ROMD)) {
3036 target_phys_addr_t addr1 = addr;
3038 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3040 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3041 if (l >= 4 && ((addr1 & 3) == 0)) {
3042 /* 32 bit read access */
3043 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3046 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3047 /* 16 bit read access */
3048 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3052 /* 8 bit read access */
3053 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3059 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3060 (addr & ~TARGET_PAGE_MASK);
3061 memcpy(buf, ptr, l);
3070 /* used for ROM loading : can write in RAM and ROM */
3071 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3072 const uint8_t *buf, int len)
3076 target_phys_addr_t page;
3081 page = addr & TARGET_PAGE_MASK;
3082 l = (page + TARGET_PAGE_SIZE) - addr;
3085 p = phys_page_find(page >> TARGET_PAGE_BITS);
3087 pd = IO_MEM_UNASSIGNED;
3089 pd = p->phys_offset;
3092 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3093 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3094 !(pd & IO_MEM_ROMD)) {
3097 unsigned long addr1;
3098 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3100 ptr = phys_ram_base + addr1;
3101 memcpy(ptr, buf, l);
3111 target_phys_addr_t addr;
3112 target_phys_addr_t len;
3115 static BounceBuffer bounce;
3117 typedef struct MapClient {
3119 void (*callback)(void *opaque);
3120 LIST_ENTRY(MapClient) link;
3123 static LIST_HEAD(map_client_list, MapClient) map_client_list
3124 = LIST_HEAD_INITIALIZER(map_client_list);
3126 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3128 MapClient *client = qemu_malloc(sizeof(*client));
3130 client->opaque = opaque;
3131 client->callback = callback;
3132 LIST_INSERT_HEAD(&map_client_list, client, link);
3136 void cpu_unregister_map_client(void *_client)
3138 MapClient *client = (MapClient *)_client;
3140 LIST_REMOVE(client, link);
3143 static void cpu_notify_map_clients(void)
3147 while (!LIST_EMPTY(&map_client_list)) {
3148 client = LIST_FIRST(&map_client_list);
3149 client->callback(client->opaque);
3150 LIST_REMOVE(client, link);
3154 /* Map a physical memory region into a host virtual address.
3155 * May map a subset of the requested range, given by and returned in *plen.
3156 * May return NULL if resources needed to perform the mapping are exhausted.
3157 * Use only for reads OR writes - not for read-modify-write operations.
3158 * Use cpu_register_map_client() to know when retrying the map operation is
3159 * likely to succeed.
3161 void *cpu_physical_memory_map(target_phys_addr_t addr,
3162 target_phys_addr_t *plen,
3165 target_phys_addr_t len = *plen;
3166 target_phys_addr_t done = 0;
3168 uint8_t *ret = NULL;
3170 target_phys_addr_t page;
3173 unsigned long addr1;
3176 page = addr & TARGET_PAGE_MASK;
3177 l = (page + TARGET_PAGE_SIZE) - addr;
3180 p = phys_page_find(page >> TARGET_PAGE_BITS);
3182 pd = IO_MEM_UNASSIGNED;
3184 pd = p->phys_offset;
3187 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3188 if (done || bounce.buffer) {
3191 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3195 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3197 ptr = bounce.buffer;
3199 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3200 ptr = phys_ram_base + addr1;
3204 } else if (ret + done != ptr) {
3216 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3217 * Will also mark the memory as dirty if is_write == 1. access_len gives
3218 * the amount of memory that was actually read or written by the caller.
3220 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3221 int is_write, target_phys_addr_t access_len)
3223 if (buffer != bounce.buffer) {
3225 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3226 while (access_len) {
3228 l = TARGET_PAGE_SIZE;
3231 if (!cpu_physical_memory_is_dirty(addr1)) {
3232 /* invalidate code */
3233 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3235 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3236 (0xff & ~CODE_DIRTY_FLAG);
3245 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3247 qemu_free(bounce.buffer);
3248 bounce.buffer = NULL;
3249 cpu_notify_map_clients();
3252 /* warning: addr must be aligned */
3253 uint32_t ldl_phys(target_phys_addr_t addr)
3261 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3263 pd = IO_MEM_UNASSIGNED;
3265 pd = p->phys_offset;
3268 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3269 !(pd & IO_MEM_ROMD)) {
3271 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3273 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3274 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3277 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3278 (addr & ~TARGET_PAGE_MASK);
3284 /* warning: addr must be aligned */
3285 uint64_t ldq_phys(target_phys_addr_t addr)
3293 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3295 pd = IO_MEM_UNASSIGNED;
3297 pd = p->phys_offset;
3300 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3301 !(pd & IO_MEM_ROMD)) {
3303 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3305 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3306 #ifdef TARGET_WORDS_BIGENDIAN
3307 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3308 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3310 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3311 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3315 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3316 (addr & ~TARGET_PAGE_MASK);
3323 uint32_t ldub_phys(target_phys_addr_t addr)
3326 cpu_physical_memory_read(addr, &val, 1);
3331 uint32_t lduw_phys(target_phys_addr_t addr)
3334 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3335 return tswap16(val);
3338 /* warning: addr must be aligned. The ram page is not masked as dirty
3339 and the code inside is not invalidated. It is useful if the dirty
3340 bits are used to track modified PTEs */
3341 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3348 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3350 pd = IO_MEM_UNASSIGNED;
3352 pd = p->phys_offset;
3355 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3356 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3358 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3359 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3361 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3362 ptr = phys_ram_base + addr1;
3365 if (unlikely(in_migration)) {
3366 if (!cpu_physical_memory_is_dirty(addr1)) {
3367 /* invalidate code */
3368 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3370 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3371 (0xff & ~CODE_DIRTY_FLAG);
3377 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3384 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3386 pd = IO_MEM_UNASSIGNED;
3388 pd = p->phys_offset;
3391 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3392 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3394 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3395 #ifdef TARGET_WORDS_BIGENDIAN
3396 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3397 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3399 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3400 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3403 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3404 (addr & ~TARGET_PAGE_MASK);
3409 /* warning: addr must be aligned */
3410 void stl_phys(target_phys_addr_t addr, uint32_t val)
3417 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3419 pd = IO_MEM_UNASSIGNED;
3421 pd = p->phys_offset;
3424 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3425 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3427 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3428 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3430 unsigned long addr1;
3431 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3433 ptr = phys_ram_base + addr1;
3435 if (!cpu_physical_memory_is_dirty(addr1)) {
3436 /* invalidate code */
3437 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3439 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3440 (0xff & ~CODE_DIRTY_FLAG);
3446 void stb_phys(target_phys_addr_t addr, uint32_t val)
3449 cpu_physical_memory_write(addr, &v, 1);
3453 void stw_phys(target_phys_addr_t addr, uint32_t val)
3455 uint16_t v = tswap16(val);
3456 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3460 void stq_phys(target_phys_addr_t addr, uint64_t val)
3463 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3468 /* virtual memory access for debug */
3469 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3470 uint8_t *buf, int len, int is_write)
3473 target_phys_addr_t phys_addr;
3477 page = addr & TARGET_PAGE_MASK;
3478 phys_addr = cpu_get_phys_page_debug(env, page);
3479 /* if no physical page mapped, return an error */
3480 if (phys_addr == -1)
3482 l = (page + TARGET_PAGE_SIZE) - addr;
3485 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3494 /* in deterministic execution mode, instructions doing device I/Os
3495 must be at the end of the TB */
3496 void cpu_io_recompile(CPUState *env, void *retaddr)
3498 TranslationBlock *tb;
3500 target_ulong pc, cs_base;
3503 tb = tb_find_pc((unsigned long)retaddr);
3505 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3508 n = env->icount_decr.u16.low + tb->icount;
3509 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3510 /* Calculate how many instructions had been executed before the fault
3512 n = n - env->icount_decr.u16.low;
3513 /* Generate a new TB ending on the I/O insn. */
3515 /* On MIPS and SH, delay slot instructions can only be restarted if
3516 they were already the first instruction in the TB. If this is not
3517 the first instruction in a TB then re-execute the preceding
3519 #if defined(TARGET_MIPS)
3520 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3521 env->active_tc.PC -= 4;
3522 env->icount_decr.u16.low++;
3523 env->hflags &= ~MIPS_HFLAG_BMASK;
3525 #elif defined(TARGET_SH4)
3526 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3529 env->icount_decr.u16.low++;
3530 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3533 /* This should never happen. */
3534 if (n > CF_COUNT_MASK)
3535 cpu_abort(env, "TB too big during recompile");
3537 cflags = n | CF_LAST_IO;
3539 cs_base = tb->cs_base;
3541 tb_phys_invalidate(tb, -1);
3542 /* FIXME: In theory this could raise an exception. In practice
3543 we have already translated the block once so it's probably ok. */
3544 tb_gen_code(env, pc, cs_base, flags, cflags);
3545 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3546 the first in the TB) then we end up generating a whole new TB and
3547 repeating the fault, which is horribly inefficient.
3548 Better would be to execute just this insn uncached, or generate a
3550 cpu_resume_from_signal(env, NULL);
3553 void dump_exec_info(FILE *f,
3554 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3556 int i, target_code_size, max_target_code_size;
3557 int direct_jmp_count, direct_jmp2_count, cross_page;
3558 TranslationBlock *tb;
3560 target_code_size = 0;
3561 max_target_code_size = 0;
3563 direct_jmp_count = 0;
3564 direct_jmp2_count = 0;
3565 for(i = 0; i < nb_tbs; i++) {
3567 target_code_size += tb->size;
3568 if (tb->size > max_target_code_size)
3569 max_target_code_size = tb->size;
3570 if (tb->page_addr[1] != -1)
3572 if (tb->tb_next_offset[0] != 0xffff) {
3574 if (tb->tb_next_offset[1] != 0xffff) {
3575 direct_jmp2_count++;
3579 /* XXX: avoid using doubles ? */
3580 cpu_fprintf(f, "Translation buffer state:\n");
3581 cpu_fprintf(f, "gen code size %ld/%ld\n",
3582 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3583 cpu_fprintf(f, "TB count %d/%d\n",
3584 nb_tbs, code_gen_max_blocks);
3585 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3586 nb_tbs ? target_code_size / nb_tbs : 0,
3587 max_target_code_size);
3588 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3589 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3590 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3591 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3593 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3594 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3596 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3598 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3599 cpu_fprintf(f, "\nStatistics:\n");
3600 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3601 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3602 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3603 tcg_dump_info(f, cpu_fprintf);
3606 #if !defined(CONFIG_USER_ONLY)
3608 #define MMUSUFFIX _cmmu
3609 #define GETPC() NULL
3610 #define env cpu_single_env
3611 #define SOFTMMU_CODE_ACCESS
3614 #include "softmmu_template.h"
3617 #include "softmmu_template.h"
3620 #include "softmmu_template.h"
3623 #include "softmmu_template.h"