2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
24 #include <sys/types.h>
37 #include "qemu-common.h"
42 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
83 static TranslationBlock *tbs;
84 int code_gen_max_blocks;
85 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 /* any access to the tbs or the page table must use this lock */
88 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90 #if defined(__arm__) || defined(__sparc_v9__)
91 /* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
93 section close to code segment. */
94 #define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
98 #define code_gen_section \
99 __attribute__((aligned (32)))
102 uint8_t code_gen_prologue[1024] code_gen_section;
103 static uint8_t *code_gen_buffer;
104 static unsigned long code_gen_buffer_size;
105 /* threshold to flush the translated code buffer */
106 static unsigned long code_gen_buffer_max_size;
107 uint8_t *code_gen_ptr;
109 #if !defined(CONFIG_USER_ONLY)
111 uint8_t *phys_ram_dirty;
112 static int in_migration;
114 typedef struct RAMBlock {
118 struct RAMBlock *next;
121 static RAMBlock *ram_blocks;
122 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123 then we can no longer assume contiguous ram offsets, and external uses
124 of this variable will break. */
125 ram_addr_t last_ram_offset;
129 /* current CPU in the current thread. It is only valid inside
131 CPUState *cpu_single_env;
132 /* 0 = Do not count executed instructions.
133 1 = Precise instruction counting.
134 2 = Adaptive rate instruction counting. */
136 /* Current instruction counter. While executing translated code this may
137 include some instructions that have not yet been executed. */
140 typedef struct PageDesc {
141 /* list of TBs intersecting this ram page */
142 TranslationBlock *first_tb;
143 /* in order to optimize self modifying code, we count the number
144 of lookups we do to a given page to use a bitmap */
145 unsigned int code_write_count;
146 uint8_t *code_bitmap;
147 #if defined(CONFIG_USER_ONLY)
152 typedef struct PhysPageDesc {
153 /* offset in host memory of the page + io_index in the low bits */
154 ram_addr_t phys_offset;
155 ram_addr_t region_offset;
159 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160 /* XXX: this is a temporary hack for alpha target.
161 * In the future, this is to be replaced by a multi-level table
162 * to actually be able to handle the complete 64 bits address space.
164 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
166 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169 #define L1_SIZE (1 << L1_BITS)
170 #define L2_SIZE (1 << L2_BITS)
172 unsigned long qemu_real_host_page_size;
173 unsigned long qemu_host_page_bits;
174 unsigned long qemu_host_page_size;
175 unsigned long qemu_host_page_mask;
177 /* XXX: for system emulation, it could just be an array */
178 static PageDesc *l1_map[L1_SIZE];
179 static PhysPageDesc **l1_phys_map;
181 #if !defined(CONFIG_USER_ONLY)
182 static void io_mem_init(void);
184 /* io memory support */
185 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188 static char io_mem_used[IO_MEM_NB_ENTRIES];
189 static int io_mem_watch;
194 static const char *logfilename = "qemu.log";
196 static const char *logfilename = "/tmp/qemu.log";
200 static int log_append = 0;
203 static int tlb_flush_count;
204 static int tb_flush_count;
205 static int tb_phys_invalidate_count;
207 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
208 typedef struct subpage_t {
209 target_phys_addr_t base;
210 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
211 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
212 void *opaque[TARGET_PAGE_SIZE][2][4];
213 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
217 static void map_exec(void *addr, long size)
220 VirtualProtect(addr, size,
221 PAGE_EXECUTE_READWRITE, &old_protect);
225 static void map_exec(void *addr, long size)
227 unsigned long start, end, page_size;
229 page_size = getpagesize();
230 start = (unsigned long)addr;
231 start &= ~(page_size - 1);
233 end = (unsigned long)addr + size;
234 end += page_size - 1;
235 end &= ~(page_size - 1);
237 mprotect((void *)start, end - start,
238 PROT_READ | PROT_WRITE | PROT_EXEC);
242 static void page_init(void)
244 /* NOTE: we can always suppose that qemu_host_page_size >=
248 SYSTEM_INFO system_info;
250 GetSystemInfo(&system_info);
251 qemu_real_host_page_size = system_info.dwPageSize;
254 qemu_real_host_page_size = getpagesize();
256 if (qemu_host_page_size == 0)
257 qemu_host_page_size = qemu_real_host_page_size;
258 if (qemu_host_page_size < TARGET_PAGE_SIZE)
259 qemu_host_page_size = TARGET_PAGE_SIZE;
260 qemu_host_page_bits = 0;
261 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
262 qemu_host_page_bits++;
263 qemu_host_page_mask = ~(qemu_host_page_size - 1);
264 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
265 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
267 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
269 long long startaddr, endaddr;
274 last_brk = (unsigned long)sbrk(0);
275 f = fopen("/proc/self/maps", "r");
278 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
280 startaddr = MIN(startaddr,
281 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282 endaddr = MIN(endaddr,
283 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
284 page_set_flags(startaddr & TARGET_PAGE_MASK,
285 TARGET_PAGE_ALIGN(endaddr),
296 static inline PageDesc **page_l1_map(target_ulong index)
298 #if TARGET_LONG_BITS > 32
299 /* Host memory outside guest VM. For 32-bit targets we have already
300 excluded high addresses. */
301 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
304 return &l1_map[index >> L2_BITS];
307 static inline PageDesc *page_find_alloc(target_ulong index)
310 lp = page_l1_map(index);
316 /* allocate if not found */
317 #if defined(CONFIG_USER_ONLY)
318 size_t len = sizeof(PageDesc) * L2_SIZE;
319 /* Don't use qemu_malloc because it may recurse. */
320 p = mmap(0, len, PROT_READ | PROT_WRITE,
321 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
324 unsigned long addr = h2g(p);
325 page_set_flags(addr & TARGET_PAGE_MASK,
326 TARGET_PAGE_ALIGN(addr + len),
330 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
334 return p + (index & (L2_SIZE - 1));
337 static inline PageDesc *page_find(target_ulong index)
340 lp = page_l1_map(index);
347 return p + (index & (L2_SIZE - 1));
350 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
355 p = (void **)l1_phys_map;
356 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
358 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
359 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
361 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
364 /* allocate if not found */
367 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
368 memset(p, 0, sizeof(void *) * L1_SIZE);
372 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
376 /* allocate if not found */
379 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
381 for (i = 0; i < L2_SIZE; i++) {
382 pd[i].phys_offset = IO_MEM_UNASSIGNED;
383 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
386 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
389 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
391 return phys_page_find_alloc(index, 0);
394 #if !defined(CONFIG_USER_ONLY)
395 static void tlb_protect_code(ram_addr_t ram_addr);
396 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
398 #define mmap_lock() do { } while(0)
399 #define mmap_unlock() do { } while(0)
402 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
404 #if defined(CONFIG_USER_ONLY)
405 /* Currently it is not recommended to allocate big chunks of data in
406 user mode. It will change when a dedicated libc will be used */
407 #define USE_STATIC_CODE_GEN_BUFFER
410 #ifdef USE_STATIC_CODE_GEN_BUFFER
411 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
414 static void code_gen_alloc(unsigned long tb_size)
416 #ifdef USE_STATIC_CODE_GEN_BUFFER
417 code_gen_buffer = static_code_gen_buffer;
418 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
419 map_exec(code_gen_buffer, code_gen_buffer_size);
421 code_gen_buffer_size = tb_size;
422 if (code_gen_buffer_size == 0) {
423 #if defined(CONFIG_USER_ONLY)
424 /* in user mode, phys_ram_size is not meaningful */
425 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
427 /* XXX: needs adjustments */
428 code_gen_buffer_size = (unsigned long)(ram_size / 4);
431 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
432 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
433 /* The code gen buffer location may have constraints depending on
434 the host cpu and OS */
435 #if defined(__linux__)
440 flags = MAP_PRIVATE | MAP_ANONYMOUS;
441 #if defined(__x86_64__)
443 /* Cannot map more than that */
444 if (code_gen_buffer_size > (800 * 1024 * 1024))
445 code_gen_buffer_size = (800 * 1024 * 1024);
446 #elif defined(__sparc_v9__)
447 // Map the buffer below 2G, so we can use direct calls and branches
449 start = (void *) 0x60000000UL;
450 if (code_gen_buffer_size > (512 * 1024 * 1024))
451 code_gen_buffer_size = (512 * 1024 * 1024);
452 #elif defined(__arm__)
453 /* Map the buffer below 32M, so we can use direct calls and branches */
455 start = (void *) 0x01000000UL;
456 if (code_gen_buffer_size > 16 * 1024 * 1024)
457 code_gen_buffer_size = 16 * 1024 * 1024;
459 code_gen_buffer = mmap(start, code_gen_buffer_size,
460 PROT_WRITE | PROT_READ | PROT_EXEC,
462 if (code_gen_buffer == MAP_FAILED) {
463 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 #elif defined(__FreeBSD__) || defined(__DragonFly__)
471 flags = MAP_PRIVATE | MAP_ANONYMOUS;
472 #if defined(__x86_64__)
473 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
474 * 0x40000000 is free */
476 addr = (void *)0x40000000;
477 /* Cannot map more than that */
478 if (code_gen_buffer_size > (800 * 1024 * 1024))
479 code_gen_buffer_size = (800 * 1024 * 1024);
481 code_gen_buffer = mmap(addr, code_gen_buffer_size,
482 PROT_WRITE | PROT_READ | PROT_EXEC,
484 if (code_gen_buffer == MAP_FAILED) {
485 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
490 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
491 map_exec(code_gen_buffer, code_gen_buffer_size);
493 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
494 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
495 code_gen_buffer_max_size = code_gen_buffer_size -
496 code_gen_max_block_size();
497 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
498 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
501 /* Must be called before using the QEMU cpus. 'tb_size' is the size
502 (in bytes) allocated to the translation buffer. Zero means default
504 void cpu_exec_init_all(unsigned long tb_size)
507 code_gen_alloc(tb_size);
508 code_gen_ptr = code_gen_buffer;
510 #if !defined(CONFIG_USER_ONLY)
515 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
517 #define CPU_COMMON_SAVE_VERSION 1
519 static void cpu_common_save(QEMUFile *f, void *opaque)
521 CPUState *env = opaque;
523 qemu_put_be32s(f, &env->halted);
524 qemu_put_be32s(f, &env->interrupt_request);
527 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
529 CPUState *env = opaque;
531 if (version_id != CPU_COMMON_SAVE_VERSION)
534 qemu_get_be32s(f, &env->halted);
535 qemu_get_be32s(f, &env->interrupt_request);
536 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
537 version_id is increased. */
538 env->interrupt_request &= ~0x01;
545 void cpu_exec_init(CPUState *env)
550 #if defined(CONFIG_USER_ONLY)
553 env->next_cpu = NULL;
556 while (*penv != NULL) {
557 penv = (CPUState **)&(*penv)->next_cpu;
560 env->cpu_index = cpu_index;
562 TAILQ_INIT(&env->breakpoints);
563 TAILQ_INIT(&env->watchpoints);
565 #if defined(CONFIG_USER_ONLY)
568 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
569 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
570 cpu_common_save, cpu_common_load, env);
571 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
572 cpu_save, cpu_load, env);
576 static inline void invalidate_page_bitmap(PageDesc *p)
578 if (p->code_bitmap) {
579 qemu_free(p->code_bitmap);
580 p->code_bitmap = NULL;
582 p->code_write_count = 0;
585 /* set to NULL all the 'first_tb' fields in all PageDescs */
586 static void page_flush_tb(void)
591 for(i = 0; i < L1_SIZE; i++) {
594 for(j = 0; j < L2_SIZE; j++) {
596 invalidate_page_bitmap(p);
603 /* flush all the translation blocks */
604 /* XXX: tb_flush is currently not thread safe */
605 void tb_flush(CPUState *env1)
608 #if defined(DEBUG_FLUSH)
609 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
610 (unsigned long)(code_gen_ptr - code_gen_buffer),
612 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
614 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
615 cpu_abort(env1, "Internal error: code buffer overflow\n");
619 for(env = first_cpu; env != NULL; env = env->next_cpu) {
620 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
623 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
626 code_gen_ptr = code_gen_buffer;
627 /* XXX: flush processor icache at this point if cache flush is
632 #ifdef DEBUG_TB_CHECK
634 static void tb_invalidate_check(target_ulong address)
636 TranslationBlock *tb;
638 address &= TARGET_PAGE_MASK;
639 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
640 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
641 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
642 address >= tb->pc + tb->size)) {
643 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
644 address, (long)tb->pc, tb->size);
650 /* verify that all the pages have correct rights for code */
651 static void tb_page_check(void)
653 TranslationBlock *tb;
654 int i, flags1, flags2;
656 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
657 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
658 flags1 = page_get_flags(tb->pc);
659 flags2 = page_get_flags(tb->pc + tb->size - 1);
660 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
661 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
662 (long)tb->pc, tb->size, flags1, flags2);
668 static void tb_jmp_check(TranslationBlock *tb)
670 TranslationBlock *tb1;
673 /* suppress any remaining jumps to this TB */
677 tb1 = (TranslationBlock *)((long)tb1 & ~3);
680 tb1 = tb1->jmp_next[n1];
682 /* check end of list */
684 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
690 /* invalidate one TB */
691 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
694 TranslationBlock *tb1;
698 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
701 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
705 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
707 TranslationBlock *tb1;
713 tb1 = (TranslationBlock *)((long)tb1 & ~3);
715 *ptb = tb1->page_next[n1];
718 ptb = &tb1->page_next[n1];
722 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
724 TranslationBlock *tb1, **ptb;
727 ptb = &tb->jmp_next[n];
730 /* find tb(n) in circular list */
734 tb1 = (TranslationBlock *)((long)tb1 & ~3);
735 if (n1 == n && tb1 == tb)
738 ptb = &tb1->jmp_first;
740 ptb = &tb1->jmp_next[n1];
743 /* now we can suppress tb(n) from the list */
744 *ptb = tb->jmp_next[n];
746 tb->jmp_next[n] = NULL;
750 /* reset the jump entry 'n' of a TB so that it is not chained to
752 static inline void tb_reset_jump(TranslationBlock *tb, int n)
754 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
757 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
762 target_phys_addr_t phys_pc;
763 TranslationBlock *tb1, *tb2;
765 /* remove the TB from the hash list */
766 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
767 h = tb_phys_hash_func(phys_pc);
768 tb_remove(&tb_phys_hash[h], tb,
769 offsetof(TranslationBlock, phys_hash_next));
771 /* remove the TB from the page list */
772 if (tb->page_addr[0] != page_addr) {
773 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
774 tb_page_remove(&p->first_tb, tb);
775 invalidate_page_bitmap(p);
777 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
778 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
779 tb_page_remove(&p->first_tb, tb);
780 invalidate_page_bitmap(p);
783 tb_invalidated_flag = 1;
785 /* remove the TB from the hash list */
786 h = tb_jmp_cache_hash_func(tb->pc);
787 for(env = first_cpu; env != NULL; env = env->next_cpu) {
788 if (env->tb_jmp_cache[h] == tb)
789 env->tb_jmp_cache[h] = NULL;
792 /* suppress this TB from the two jump lists */
793 tb_jmp_remove(tb, 0);
794 tb_jmp_remove(tb, 1);
796 /* suppress any remaining jumps to this TB */
802 tb1 = (TranslationBlock *)((long)tb1 & ~3);
803 tb2 = tb1->jmp_next[n1];
804 tb_reset_jump(tb1, n1);
805 tb1->jmp_next[n1] = NULL;
808 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
810 tb_phys_invalidate_count++;
813 static inline void set_bits(uint8_t *tab, int start, int len)
819 mask = 0xff << (start & 7);
820 if ((start & ~7) == (end & ~7)) {
822 mask &= ~(0xff << (end & 7));
827 start = (start + 8) & ~7;
829 while (start < end1) {
834 mask = ~(0xff << (end & 7));
840 static void build_page_bitmap(PageDesc *p)
842 int n, tb_start, tb_end;
843 TranslationBlock *tb;
845 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
850 tb = (TranslationBlock *)((long)tb & ~3);
851 /* NOTE: this is subtle as a TB may span two physical pages */
853 /* NOTE: tb_end may be after the end of the page, but
854 it is not a problem */
855 tb_start = tb->pc & ~TARGET_PAGE_MASK;
856 tb_end = tb_start + tb->size;
857 if (tb_end > TARGET_PAGE_SIZE)
858 tb_end = TARGET_PAGE_SIZE;
861 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
863 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
864 tb = tb->page_next[n];
868 TranslationBlock *tb_gen_code(CPUState *env,
869 target_ulong pc, target_ulong cs_base,
870 int flags, int cflags)
872 TranslationBlock *tb;
874 target_ulong phys_pc, phys_page2, virt_page2;
877 phys_pc = get_phys_addr_code(env, pc);
880 /* flush must be done */
882 /* cannot fail at this point */
884 /* Don't forget to invalidate previous TB info. */
885 tb_invalidated_flag = 1;
887 tc_ptr = code_gen_ptr;
889 tb->cs_base = cs_base;
892 cpu_gen_code(env, tb, &code_gen_size);
893 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
895 /* check next page if needed */
896 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
898 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
899 phys_page2 = get_phys_addr_code(env, virt_page2);
901 tb_link_phys(tb, phys_pc, phys_page2);
905 /* invalidate all TBs which intersect with the target physical page
906 starting in range [start;end[. NOTE: start and end must refer to
907 the same physical page. 'is_cpu_write_access' should be true if called
908 from a real cpu write access: the virtual CPU will exit the current
909 TB if code is modified inside this TB. */
910 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
911 int is_cpu_write_access)
913 TranslationBlock *tb, *tb_next, *saved_tb;
914 CPUState *env = cpu_single_env;
915 target_ulong tb_start, tb_end;
918 #ifdef TARGET_HAS_PRECISE_SMC
919 int current_tb_not_found = is_cpu_write_access;
920 TranslationBlock *current_tb = NULL;
921 int current_tb_modified = 0;
922 target_ulong current_pc = 0;
923 target_ulong current_cs_base = 0;
924 int current_flags = 0;
925 #endif /* TARGET_HAS_PRECISE_SMC */
927 p = page_find(start >> TARGET_PAGE_BITS);
930 if (!p->code_bitmap &&
931 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
932 is_cpu_write_access) {
933 /* build code bitmap */
934 build_page_bitmap(p);
937 /* we remove all the TBs in the range [start, end[ */
938 /* XXX: see if in some cases it could be faster to invalidate all the code */
942 tb = (TranslationBlock *)((long)tb & ~3);
943 tb_next = tb->page_next[n];
944 /* NOTE: this is subtle as a TB may span two physical pages */
946 /* NOTE: tb_end may be after the end of the page, but
947 it is not a problem */
948 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
949 tb_end = tb_start + tb->size;
951 tb_start = tb->page_addr[1];
952 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
954 if (!(tb_end <= start || tb_start >= end)) {
955 #ifdef TARGET_HAS_PRECISE_SMC
956 if (current_tb_not_found) {
957 current_tb_not_found = 0;
959 if (env->mem_io_pc) {
960 /* now we have a real cpu fault */
961 current_tb = tb_find_pc(env->mem_io_pc);
964 if (current_tb == tb &&
965 (current_tb->cflags & CF_COUNT_MASK) != 1) {
966 /* If we are modifying the current TB, we must stop
967 its execution. We could be more precise by checking
968 that the modification is after the current PC, but it
969 would require a specialized function to partially
970 restore the CPU state */
972 current_tb_modified = 1;
973 cpu_restore_state(current_tb, env,
974 env->mem_io_pc, NULL);
975 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
978 #endif /* TARGET_HAS_PRECISE_SMC */
979 /* we need to do that to handle the case where a signal
980 occurs while doing tb_phys_invalidate() */
983 saved_tb = env->current_tb;
984 env->current_tb = NULL;
986 tb_phys_invalidate(tb, -1);
988 env->current_tb = saved_tb;
989 if (env->interrupt_request && env->current_tb)
990 cpu_interrupt(env, env->interrupt_request);
995 #if !defined(CONFIG_USER_ONLY)
996 /* if no code remaining, no need to continue to use slow writes */
998 invalidate_page_bitmap(p);
999 if (is_cpu_write_access) {
1000 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1004 #ifdef TARGET_HAS_PRECISE_SMC
1005 if (current_tb_modified) {
1006 /* we generate a block containing just the instruction
1007 modifying the memory. It will ensure that it cannot modify
1009 env->current_tb = NULL;
1010 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1011 cpu_resume_from_signal(env, NULL);
1016 /* len must be <= 8 and start must be a multiple of len */
1017 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1023 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1024 cpu_single_env->mem_io_vaddr, len,
1025 cpu_single_env->eip,
1026 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1029 p = page_find(start >> TARGET_PAGE_BITS);
1032 if (p->code_bitmap) {
1033 offset = start & ~TARGET_PAGE_MASK;
1034 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1035 if (b & ((1 << len) - 1))
1039 tb_invalidate_phys_page_range(start, start + len, 1);
1043 #if !defined(CONFIG_SOFTMMU)
1044 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1045 unsigned long pc, void *puc)
1047 TranslationBlock *tb;
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 TranslationBlock *current_tb = NULL;
1052 CPUState *env = cpu_single_env;
1053 int current_tb_modified = 0;
1054 target_ulong current_pc = 0;
1055 target_ulong current_cs_base = 0;
1056 int current_flags = 0;
1059 addr &= TARGET_PAGE_MASK;
1060 p = page_find(addr >> TARGET_PAGE_BITS);
1064 #ifdef TARGET_HAS_PRECISE_SMC
1065 if (tb && pc != 0) {
1066 current_tb = tb_find_pc(pc);
1069 while (tb != NULL) {
1071 tb = (TranslationBlock *)((long)tb & ~3);
1072 #ifdef TARGET_HAS_PRECISE_SMC
1073 if (current_tb == tb &&
1074 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1075 /* If we are modifying the current TB, we must stop
1076 its execution. We could be more precise by checking
1077 that the modification is after the current PC, but it
1078 would require a specialized function to partially
1079 restore the CPU state */
1081 current_tb_modified = 1;
1082 cpu_restore_state(current_tb, env, pc, puc);
1083 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1086 #endif /* TARGET_HAS_PRECISE_SMC */
1087 tb_phys_invalidate(tb, addr);
1088 tb = tb->page_next[n];
1091 #ifdef TARGET_HAS_PRECISE_SMC
1092 if (current_tb_modified) {
1093 /* we generate a block containing just the instruction
1094 modifying the memory. It will ensure that it cannot modify
1096 env->current_tb = NULL;
1097 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1098 cpu_resume_from_signal(env, puc);
1104 /* add the tb in the target page and protect it if necessary */
1105 static inline void tb_alloc_page(TranslationBlock *tb,
1106 unsigned int n, target_ulong page_addr)
1109 TranslationBlock *last_first_tb;
1111 tb->page_addr[n] = page_addr;
1112 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1113 tb->page_next[n] = p->first_tb;
1114 last_first_tb = p->first_tb;
1115 p->first_tb = (TranslationBlock *)((long)tb | n);
1116 invalidate_page_bitmap(p);
1118 #if defined(TARGET_HAS_SMC) || 1
1120 #if defined(CONFIG_USER_ONLY)
1121 if (p->flags & PAGE_WRITE) {
1126 /* force the host page as non writable (writes will have a
1127 page fault + mprotect overhead) */
1128 page_addr &= qemu_host_page_mask;
1130 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1131 addr += TARGET_PAGE_SIZE) {
1133 p2 = page_find (addr >> TARGET_PAGE_BITS);
1137 p2->flags &= ~PAGE_WRITE;
1138 page_get_flags(addr);
1140 mprotect(g2h(page_addr), qemu_host_page_size,
1141 (prot & PAGE_BITS) & ~PAGE_WRITE);
1142 #ifdef DEBUG_TB_INVALIDATE
1143 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1148 /* if some code is already present, then the pages are already
1149 protected. So we handle the case where only the first TB is
1150 allocated in a physical page */
1151 if (!last_first_tb) {
1152 tlb_protect_code(page_addr);
1156 #endif /* TARGET_HAS_SMC */
1159 /* Allocate a new translation block. Flush the translation buffer if
1160 too many translation blocks or too much generated code. */
1161 TranslationBlock *tb_alloc(target_ulong pc)
1163 TranslationBlock *tb;
1165 if (nb_tbs >= code_gen_max_blocks ||
1166 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1168 tb = &tbs[nb_tbs++];
1174 void tb_free(TranslationBlock *tb)
1176 /* In practice this is mostly used for single use temporary TB
1177 Ignore the hard cases and just back up if this TB happens to
1178 be the last one generated. */
1179 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1180 code_gen_ptr = tb->tc_ptr;
1185 /* add a new TB and link it to the physical page tables. phys_page2 is
1186 (-1) to indicate that only one page contains the TB. */
1187 void tb_link_phys(TranslationBlock *tb,
1188 target_ulong phys_pc, target_ulong phys_page2)
1191 TranslationBlock **ptb;
1193 /* Grab the mmap lock to stop another thread invalidating this TB
1194 before we are done. */
1196 /* add in the physical hash table */
1197 h = tb_phys_hash_func(phys_pc);
1198 ptb = &tb_phys_hash[h];
1199 tb->phys_hash_next = *ptb;
1202 /* add in the page list */
1203 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1204 if (phys_page2 != -1)
1205 tb_alloc_page(tb, 1, phys_page2);
1207 tb->page_addr[1] = -1;
1209 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1210 tb->jmp_next[0] = NULL;
1211 tb->jmp_next[1] = NULL;
1213 /* init original jump addresses */
1214 if (tb->tb_next_offset[0] != 0xffff)
1215 tb_reset_jump(tb, 0);
1216 if (tb->tb_next_offset[1] != 0xffff)
1217 tb_reset_jump(tb, 1);
1219 #ifdef DEBUG_TB_CHECK
1225 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1226 tb[1].tc_ptr. Return NULL if not found */
1227 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1229 int m_min, m_max, m;
1231 TranslationBlock *tb;
1235 if (tc_ptr < (unsigned long)code_gen_buffer ||
1236 tc_ptr >= (unsigned long)code_gen_ptr)
1238 /* binary search (cf Knuth) */
1241 while (m_min <= m_max) {
1242 m = (m_min + m_max) >> 1;
1244 v = (unsigned long)tb->tc_ptr;
1247 else if (tc_ptr < v) {
1256 static void tb_reset_jump_recursive(TranslationBlock *tb);
1258 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1260 TranslationBlock *tb1, *tb_next, **ptb;
1263 tb1 = tb->jmp_next[n];
1265 /* find head of list */
1268 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1271 tb1 = tb1->jmp_next[n1];
1273 /* we are now sure now that tb jumps to tb1 */
1276 /* remove tb from the jmp_first list */
1277 ptb = &tb_next->jmp_first;
1281 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1282 if (n1 == n && tb1 == tb)
1284 ptb = &tb1->jmp_next[n1];
1286 *ptb = tb->jmp_next[n];
1287 tb->jmp_next[n] = NULL;
1289 /* suppress the jump to next tb in generated code */
1290 tb_reset_jump(tb, n);
1292 /* suppress jumps in the tb on which we could have jumped */
1293 tb_reset_jump_recursive(tb_next);
1297 static void tb_reset_jump_recursive(TranslationBlock *tb)
1299 tb_reset_jump_recursive2(tb, 0);
1300 tb_reset_jump_recursive2(tb, 1);
1303 #if defined(TARGET_HAS_ICE)
1304 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1306 target_phys_addr_t addr;
1308 ram_addr_t ram_addr;
1311 addr = cpu_get_phys_page_debug(env, pc);
1312 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1314 pd = IO_MEM_UNASSIGNED;
1316 pd = p->phys_offset;
1318 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1319 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1323 /* Add a watchpoint. */
1324 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1325 int flags, CPUWatchpoint **watchpoint)
1327 target_ulong len_mask = ~(len - 1);
1330 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1331 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1332 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1333 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1336 wp = qemu_malloc(sizeof(*wp));
1339 wp->len_mask = len_mask;
1342 /* keep all GDB-injected watchpoints in front */
1344 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1346 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1348 tlb_flush_page(env, addr);
1355 /* Remove a specific watchpoint. */
1356 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1359 target_ulong len_mask = ~(len - 1);
1362 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1363 if (addr == wp->vaddr && len_mask == wp->len_mask
1364 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1365 cpu_watchpoint_remove_by_ref(env, wp);
1372 /* Remove a specific watchpoint by reference. */
1373 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1375 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1377 tlb_flush_page(env, watchpoint->vaddr);
1379 qemu_free(watchpoint);
1382 /* Remove all matching watchpoints. */
1383 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1385 CPUWatchpoint *wp, *next;
1387 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1388 if (wp->flags & mask)
1389 cpu_watchpoint_remove_by_ref(env, wp);
1393 /* Add a breakpoint. */
1394 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1395 CPUBreakpoint **breakpoint)
1397 #if defined(TARGET_HAS_ICE)
1400 bp = qemu_malloc(sizeof(*bp));
1405 /* keep all GDB-injected breakpoints in front */
1407 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1409 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1411 breakpoint_invalidate(env, pc);
1421 /* Remove a specific breakpoint. */
1422 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1424 #if defined(TARGET_HAS_ICE)
1427 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1428 if (bp->pc == pc && bp->flags == flags) {
1429 cpu_breakpoint_remove_by_ref(env, bp);
1439 /* Remove a specific breakpoint by reference. */
1440 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1442 #if defined(TARGET_HAS_ICE)
1443 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1445 breakpoint_invalidate(env, breakpoint->pc);
1447 qemu_free(breakpoint);
1451 /* Remove all matching breakpoints. */
1452 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1454 #if defined(TARGET_HAS_ICE)
1455 CPUBreakpoint *bp, *next;
1457 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1458 if (bp->flags & mask)
1459 cpu_breakpoint_remove_by_ref(env, bp);
1464 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1465 CPU loop after each instruction */
1466 void cpu_single_step(CPUState *env, int enabled)
1468 #if defined(TARGET_HAS_ICE)
1469 if (env->singlestep_enabled != enabled) {
1470 env->singlestep_enabled = enabled;
1472 kvm_update_guest_debug(env, 0);
1474 /* must flush all the translated code to avoid inconsistencies */
1475 /* XXX: only flush what is necessary */
1482 /* enable or disable low levels log */
1483 void cpu_set_log(int log_flags)
1485 loglevel = log_flags;
1486 if (loglevel && !logfile) {
1487 logfile = fopen(logfilename, log_append ? "a" : "w");
1489 perror(logfilename);
1492 #if !defined(CONFIG_SOFTMMU)
1493 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1495 static char logfile_buf[4096];
1496 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1499 setvbuf(logfile, NULL, _IOLBF, 0);
1503 if (!loglevel && logfile) {
1509 void cpu_set_log_filename(const char *filename)
1511 logfilename = strdup(filename);
1516 cpu_set_log(loglevel);
1519 static void cpu_unlink_tb(CPUState *env)
1521 #if defined(USE_NPTL)
1522 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1523 problem and hope the cpu will stop of its own accord. For userspace
1524 emulation this often isn't actually as bad as it sounds. Often
1525 signals are used primarily to interrupt blocking syscalls. */
1527 TranslationBlock *tb;
1528 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1530 tb = env->current_tb;
1531 /* if the cpu is currently executing code, we must unlink it and
1532 all the potentially executing TB */
1533 if (tb && !testandset(&interrupt_lock)) {
1534 env->current_tb = NULL;
1535 tb_reset_jump_recursive(tb);
1536 resetlock(&interrupt_lock);
1541 /* mask must never be zero, except for A20 change call */
1542 void cpu_interrupt(CPUState *env, int mask)
1546 old_mask = env->interrupt_request;
1547 env->interrupt_request |= mask;
1549 #ifndef CONFIG_USER_ONLY
1551 * If called from iothread context, wake the target cpu in
1554 if (!qemu_cpu_self(env)) {
1561 env->icount_decr.u16.high = 0xffff;
1562 #ifndef CONFIG_USER_ONLY
1564 && (mask & ~old_mask) != 0) {
1565 cpu_abort(env, "Raised interrupt while not in I/O function");
1573 void cpu_reset_interrupt(CPUState *env, int mask)
1575 env->interrupt_request &= ~mask;
1578 void cpu_exit(CPUState *env)
1580 env->exit_request = 1;
1584 const CPULogItem cpu_log_items[] = {
1585 { CPU_LOG_TB_OUT_ASM, "out_asm",
1586 "show generated host assembly code for each compiled TB" },
1587 { CPU_LOG_TB_IN_ASM, "in_asm",
1588 "show target assembly code for each compiled TB" },
1589 { CPU_LOG_TB_OP, "op",
1590 "show micro ops for each compiled TB" },
1591 { CPU_LOG_TB_OP_OPT, "op_opt",
1594 "before eflags optimization and "
1596 "after liveness analysis" },
1597 { CPU_LOG_INT, "int",
1598 "show interrupts/exceptions in short format" },
1599 { CPU_LOG_EXEC, "exec",
1600 "show trace before each executed TB (lots of logs)" },
1601 { CPU_LOG_TB_CPU, "cpu",
1602 "show CPU state before block translation" },
1604 { CPU_LOG_PCALL, "pcall",
1605 "show protected mode far calls/returns/exceptions" },
1606 { CPU_LOG_RESET, "cpu_reset",
1607 "show CPU state before CPU resets" },
1610 { CPU_LOG_IOPORT, "ioport",
1611 "show all i/o ports accesses" },
1616 static int cmp1(const char *s1, int n, const char *s2)
1618 if (strlen(s2) != n)
1620 return memcmp(s1, s2, n) == 0;
1623 /* takes a comma separated list of log masks. Return 0 if error. */
1624 int cpu_str_to_log_mask(const char *str)
1626 const CPULogItem *item;
1633 p1 = strchr(p, ',');
1636 if(cmp1(p,p1-p,"all")) {
1637 for(item = cpu_log_items; item->mask != 0; item++) {
1641 for(item = cpu_log_items; item->mask != 0; item++) {
1642 if (cmp1(p, p1 - p, item->name))
1656 void cpu_abort(CPUState *env, const char *fmt, ...)
1663 fprintf(stderr, "qemu: fatal: ");
1664 vfprintf(stderr, fmt, ap);
1665 fprintf(stderr, "\n");
1667 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1669 cpu_dump_state(env, stderr, fprintf, 0);
1671 if (qemu_log_enabled()) {
1672 qemu_log("qemu: fatal: ");
1673 qemu_log_vprintf(fmt, ap2);
1676 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1678 log_cpu_state(env, 0);
1688 CPUState *cpu_copy(CPUState *env)
1690 CPUState *new_env = cpu_init(env->cpu_model_str);
1691 CPUState *next_cpu = new_env->next_cpu;
1692 int cpu_index = new_env->cpu_index;
1693 #if defined(TARGET_HAS_ICE)
1698 memcpy(new_env, env, sizeof(CPUState));
1700 /* Preserve chaining and index. */
1701 new_env->next_cpu = next_cpu;
1702 new_env->cpu_index = cpu_index;
1704 /* Clone all break/watchpoints.
1705 Note: Once we support ptrace with hw-debug register access, make sure
1706 BP_CPU break/watchpoints are handled correctly on clone. */
1707 TAILQ_INIT(&env->breakpoints);
1708 TAILQ_INIT(&env->watchpoints);
1709 #if defined(TARGET_HAS_ICE)
1710 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1711 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1713 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1714 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1722 #if !defined(CONFIG_USER_ONLY)
1724 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1728 /* Discard jump cache entries for any tb which might potentially
1729 overlap the flushed page. */
1730 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1731 memset (&env->tb_jmp_cache[i], 0,
1732 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1734 i = tb_jmp_cache_hash_page(addr);
1735 memset (&env->tb_jmp_cache[i], 0,
1736 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1739 /* NOTE: if flush_global is true, also flush global entries (not
1741 void tlb_flush(CPUState *env, int flush_global)
1745 #if defined(DEBUG_TLB)
1746 printf("tlb_flush:\n");
1748 /* must reset current TB so that interrupts cannot modify the
1749 links while we are modifying them */
1750 env->current_tb = NULL;
1752 for(i = 0; i < CPU_TLB_SIZE; i++) {
1753 env->tlb_table[0][i].addr_read = -1;
1754 env->tlb_table[0][i].addr_write = -1;
1755 env->tlb_table[0][i].addr_code = -1;
1756 env->tlb_table[1][i].addr_read = -1;
1757 env->tlb_table[1][i].addr_write = -1;
1758 env->tlb_table[1][i].addr_code = -1;
1759 #if (NB_MMU_MODES >= 3)
1760 env->tlb_table[2][i].addr_read = -1;
1761 env->tlb_table[2][i].addr_write = -1;
1762 env->tlb_table[2][i].addr_code = -1;
1764 #if (NB_MMU_MODES >= 4)
1765 env->tlb_table[3][i].addr_read = -1;
1766 env->tlb_table[3][i].addr_write = -1;
1767 env->tlb_table[3][i].addr_code = -1;
1769 #if (NB_MMU_MODES >= 5)
1770 env->tlb_table[4][i].addr_read = -1;
1771 env->tlb_table[4][i].addr_write = -1;
1772 env->tlb_table[4][i].addr_code = -1;
1777 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1780 if (env->kqemu_enabled) {
1781 kqemu_flush(env, flush_global);
1787 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1789 if (addr == (tlb_entry->addr_read &
1790 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1791 addr == (tlb_entry->addr_write &
1792 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1793 addr == (tlb_entry->addr_code &
1794 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1795 tlb_entry->addr_read = -1;
1796 tlb_entry->addr_write = -1;
1797 tlb_entry->addr_code = -1;
1801 void tlb_flush_page(CPUState *env, target_ulong addr)
1805 #if defined(DEBUG_TLB)
1806 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1808 /* must reset current TB so that interrupts cannot modify the
1809 links while we are modifying them */
1810 env->current_tb = NULL;
1812 addr &= TARGET_PAGE_MASK;
1813 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1814 tlb_flush_entry(&env->tlb_table[0][i], addr);
1815 tlb_flush_entry(&env->tlb_table[1][i], addr);
1816 #if (NB_MMU_MODES >= 3)
1817 tlb_flush_entry(&env->tlb_table[2][i], addr);
1819 #if (NB_MMU_MODES >= 4)
1820 tlb_flush_entry(&env->tlb_table[3][i], addr);
1822 #if (NB_MMU_MODES >= 5)
1823 tlb_flush_entry(&env->tlb_table[4][i], addr);
1826 tlb_flush_jmp_cache(env, addr);
1829 if (env->kqemu_enabled) {
1830 kqemu_flush_page(env, addr);
1835 /* update the TLBs so that writes to code in the virtual page 'addr'
1837 static void tlb_protect_code(ram_addr_t ram_addr)
1839 cpu_physical_memory_reset_dirty(ram_addr,
1840 ram_addr + TARGET_PAGE_SIZE,
1844 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1845 tested for self modifying code */
1846 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1849 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1852 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1853 unsigned long start, unsigned long length)
1856 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1857 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1858 if ((addr - start) < length) {
1859 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1864 /* Note: start and end must be within the same ram block. */
1865 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1869 unsigned long length, start1;
1873 start &= TARGET_PAGE_MASK;
1874 end = TARGET_PAGE_ALIGN(end);
1876 length = end - start;
1879 len = length >> TARGET_PAGE_BITS;
1881 /* XXX: should not depend on cpu context */
1883 if (env->kqemu_enabled) {
1886 for(i = 0; i < len; i++) {
1887 kqemu_set_notdirty(env, addr);
1888 addr += TARGET_PAGE_SIZE;
1892 mask = ~dirty_flags;
1893 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1894 for(i = 0; i < len; i++)
1897 /* we modify the TLB cache so that the dirty bit will be set again
1898 when accessing the range */
1899 start1 = (unsigned long)qemu_get_ram_ptr(start);
1900 /* Chek that we don't span multiple blocks - this breaks the
1901 address comparisons below. */
1902 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1903 != (end - 1) - start) {
1907 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1908 for(i = 0; i < CPU_TLB_SIZE; i++)
1909 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1910 for(i = 0; i < CPU_TLB_SIZE; i++)
1911 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1912 #if (NB_MMU_MODES >= 3)
1913 for(i = 0; i < CPU_TLB_SIZE; i++)
1914 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1916 #if (NB_MMU_MODES >= 4)
1917 for(i = 0; i < CPU_TLB_SIZE; i++)
1918 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1920 #if (NB_MMU_MODES >= 5)
1921 for(i = 0; i < CPU_TLB_SIZE; i++)
1922 tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1927 int cpu_physical_memory_set_dirty_tracking(int enable)
1929 in_migration = enable;
1933 int cpu_physical_memory_get_dirty_tracking(void)
1935 return in_migration;
1938 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1941 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1944 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1946 ram_addr_t ram_addr;
1949 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1950 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1951 + tlb_entry->addend);
1952 ram_addr = qemu_ram_addr_from_host(p);
1953 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1954 tlb_entry->addr_write |= TLB_NOTDIRTY;
1959 /* update the TLB according to the current state of the dirty bits */
1960 void cpu_tlb_update_dirty(CPUState *env)
1963 for(i = 0; i < CPU_TLB_SIZE; i++)
1964 tlb_update_dirty(&env->tlb_table[0][i]);
1965 for(i = 0; i < CPU_TLB_SIZE; i++)
1966 tlb_update_dirty(&env->tlb_table[1][i]);
1967 #if (NB_MMU_MODES >= 3)
1968 for(i = 0; i < CPU_TLB_SIZE; i++)
1969 tlb_update_dirty(&env->tlb_table[2][i]);
1971 #if (NB_MMU_MODES >= 4)
1972 for(i = 0; i < CPU_TLB_SIZE; i++)
1973 tlb_update_dirty(&env->tlb_table[3][i]);
1975 #if (NB_MMU_MODES >= 5)
1976 for(i = 0; i < CPU_TLB_SIZE; i++)
1977 tlb_update_dirty(&env->tlb_table[4][i]);
1981 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1983 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1984 tlb_entry->addr_write = vaddr;
1987 /* update the TLB corresponding to virtual page vaddr
1988 so that it is no longer dirty */
1989 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1993 vaddr &= TARGET_PAGE_MASK;
1994 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1995 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1996 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1997 #if (NB_MMU_MODES >= 3)
1998 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2000 #if (NB_MMU_MODES >= 4)
2001 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2003 #if (NB_MMU_MODES >= 5)
2004 tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
2008 /* add a new TLB entry. At most one entry for a given virtual address
2009 is permitted. Return 0 if OK or 2 if the page could not be mapped
2010 (can only happen in non SOFTMMU mode for I/O pages or pages
2011 conflicting with the host address space). */
2012 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2013 target_phys_addr_t paddr, int prot,
2014 int mmu_idx, int is_softmmu)
2019 target_ulong address;
2020 target_ulong code_address;
2021 target_phys_addr_t addend;
2025 target_phys_addr_t iotlb;
2027 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2029 pd = IO_MEM_UNASSIGNED;
2031 pd = p->phys_offset;
2033 #if defined(DEBUG_TLB)
2034 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2035 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2040 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2041 /* IO memory case (romd handled later) */
2042 address |= TLB_MMIO;
2044 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2045 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2047 iotlb = pd & TARGET_PAGE_MASK;
2048 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2049 iotlb |= IO_MEM_NOTDIRTY;
2051 iotlb |= IO_MEM_ROM;
2053 /* IO handlers are currently passed a physical address.
2054 It would be nice to pass an offset from the base address
2055 of that region. This would avoid having to special case RAM,
2056 and avoid full address decoding in every device.
2057 We can't use the high bits of pd for this because
2058 IO_MEM_ROMD uses these as a ram address. */
2059 iotlb = (pd & ~TARGET_PAGE_MASK);
2061 iotlb += p->region_offset;
2067 code_address = address;
2068 /* Make accesses to pages with watchpoints go via the
2069 watchpoint trap routines. */
2070 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2071 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2072 iotlb = io_mem_watch + paddr;
2073 /* TODO: The memory case can be optimized by not trapping
2074 reads of pages with a write breakpoint. */
2075 address |= TLB_MMIO;
2079 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2080 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2081 te = &env->tlb_table[mmu_idx][index];
2082 te->addend = addend - vaddr;
2083 if (prot & PAGE_READ) {
2084 te->addr_read = address;
2089 if (prot & PAGE_EXEC) {
2090 te->addr_code = code_address;
2094 if (prot & PAGE_WRITE) {
2095 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2096 (pd & IO_MEM_ROMD)) {
2097 /* Write access calls the I/O callback. */
2098 te->addr_write = address | TLB_MMIO;
2099 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2100 !cpu_physical_memory_is_dirty(pd)) {
2101 te->addr_write = address | TLB_NOTDIRTY;
2103 te->addr_write = address;
2106 te->addr_write = -1;
2113 void tlb_flush(CPUState *env, int flush_global)
2117 void tlb_flush_page(CPUState *env, target_ulong addr)
2121 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2122 target_phys_addr_t paddr, int prot,
2123 int mmu_idx, int is_softmmu)
2129 * Walks guest process memory "regions" one by one
2130 * and calls callback function 'fn' for each region.
2132 int walk_memory_regions(void *priv,
2133 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2135 unsigned long start, end;
2137 int i, j, prot, prot1;
2143 for (i = 0; i <= L1_SIZE; i++) {
2144 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2145 for (j = 0; j < L2_SIZE; j++) {
2146 prot1 = (p == NULL) ? 0 : p[j].flags;
2148 * "region" is one continuous chunk of memory
2149 * that has same protection flags set.
2151 if (prot1 != prot) {
2152 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2154 rc = (*fn)(priv, start, end, prot);
2155 /* callback can stop iteration by returning != 0 */
2172 static int dump_region(void *priv, unsigned long start,
2173 unsigned long end, unsigned long prot)
2175 FILE *f = (FILE *)priv;
2177 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2178 start, end, end - start,
2179 ((prot & PAGE_READ) ? 'r' : '-'),
2180 ((prot & PAGE_WRITE) ? 'w' : '-'),
2181 ((prot & PAGE_EXEC) ? 'x' : '-'));
2186 /* dump memory mappings */
2187 void page_dump(FILE *f)
2189 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2190 "start", "end", "size", "prot");
2191 walk_memory_regions(f, dump_region);
2194 int page_get_flags(target_ulong address)
2198 p = page_find(address >> TARGET_PAGE_BITS);
2204 /* modify the flags of a page and invalidate the code if
2205 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2206 depending on PAGE_WRITE */
2207 void page_set_flags(target_ulong start, target_ulong end, int flags)
2212 /* mmap_lock should already be held. */
2213 start = start & TARGET_PAGE_MASK;
2214 end = TARGET_PAGE_ALIGN(end);
2215 if (flags & PAGE_WRITE)
2216 flags |= PAGE_WRITE_ORG;
2217 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2218 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2219 /* We may be called for host regions that are outside guest
2223 /* if the write protection is set, then we invalidate the code
2225 if (!(p->flags & PAGE_WRITE) &&
2226 (flags & PAGE_WRITE) &&
2228 tb_invalidate_phys_page(addr, 0, NULL);
2234 int page_check_range(target_ulong start, target_ulong len, int flags)
2240 if (start + len < start)
2241 /* we've wrapped around */
2244 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2245 start = start & TARGET_PAGE_MASK;
2247 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2248 p = page_find(addr >> TARGET_PAGE_BITS);
2251 if( !(p->flags & PAGE_VALID) )
2254 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2256 if (flags & PAGE_WRITE) {
2257 if (!(p->flags & PAGE_WRITE_ORG))
2259 /* unprotect the page if it was put read-only because it
2260 contains translated code */
2261 if (!(p->flags & PAGE_WRITE)) {
2262 if (!page_unprotect(addr, 0, NULL))
2271 /* called from signal handler: invalidate the code and unprotect the
2272 page. Return TRUE if the fault was successfully handled. */
2273 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2275 unsigned int page_index, prot, pindex;
2277 target_ulong host_start, host_end, addr;
2279 /* Technically this isn't safe inside a signal handler. However we
2280 know this only ever happens in a synchronous SEGV handler, so in
2281 practice it seems to be ok. */
2284 host_start = address & qemu_host_page_mask;
2285 page_index = host_start >> TARGET_PAGE_BITS;
2286 p1 = page_find(page_index);
2291 host_end = host_start + qemu_host_page_size;
2294 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2298 /* if the page was really writable, then we change its
2299 protection back to writable */
2300 if (prot & PAGE_WRITE_ORG) {
2301 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2302 if (!(p1[pindex].flags & PAGE_WRITE)) {
2303 mprotect((void *)g2h(host_start), qemu_host_page_size,
2304 (prot & PAGE_BITS) | PAGE_WRITE);
2305 p1[pindex].flags |= PAGE_WRITE;
2306 /* and since the content will be modified, we must invalidate
2307 the corresponding translated code. */
2308 tb_invalidate_phys_page(address, pc, puc);
2309 #ifdef DEBUG_TB_CHECK
2310 tb_invalidate_check(address);
2320 static inline void tlb_set_dirty(CPUState *env,
2321 unsigned long addr, target_ulong vaddr)
2324 #endif /* defined(CONFIG_USER_ONLY) */
2326 #if !defined(CONFIG_USER_ONLY)
2328 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2329 ram_addr_t memory, ram_addr_t region_offset);
2330 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2331 ram_addr_t orig_memory, ram_addr_t region_offset);
2332 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2335 if (addr > start_addr) \
2338 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2339 if (start_addr2 > 0) \
2343 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2344 end_addr2 = TARGET_PAGE_SIZE - 1; \
2346 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2347 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2352 /* register physical memory. 'size' must be a multiple of the target
2353 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2354 io memory page. The address used when calling the IO function is
2355 the offset from the start of the region, plus region_offset. Both
2356 start_addr and region_offset are rounded down to a page boundary
2357 before calculating this offset. This should not be a problem unless
2358 the low bits of start_addr and region_offset differ. */
2359 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2361 ram_addr_t phys_offset,
2362 ram_addr_t region_offset)
2364 target_phys_addr_t addr, end_addr;
2367 ram_addr_t orig_size = size;
2371 /* XXX: should not depend on cpu context */
2373 if (env->kqemu_enabled) {
2374 kqemu_set_phys_mem(start_addr, size, phys_offset);
2378 kvm_set_phys_mem(start_addr, size, phys_offset);
2380 if (phys_offset == IO_MEM_UNASSIGNED) {
2381 region_offset = start_addr;
2383 region_offset &= TARGET_PAGE_MASK;
2384 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2385 end_addr = start_addr + (target_phys_addr_t)size;
2386 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2387 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2388 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2389 ram_addr_t orig_memory = p->phys_offset;
2390 target_phys_addr_t start_addr2, end_addr2;
2391 int need_subpage = 0;
2393 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2395 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2396 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2397 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2398 &p->phys_offset, orig_memory,
2401 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2404 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2406 p->region_offset = 0;
2408 p->phys_offset = phys_offset;
2409 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2410 (phys_offset & IO_MEM_ROMD))
2411 phys_offset += TARGET_PAGE_SIZE;
2414 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2415 p->phys_offset = phys_offset;
2416 p->region_offset = region_offset;
2417 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2418 (phys_offset & IO_MEM_ROMD)) {
2419 phys_offset += TARGET_PAGE_SIZE;
2421 target_phys_addr_t start_addr2, end_addr2;
2422 int need_subpage = 0;
2424 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2425 end_addr2, need_subpage);
2427 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2428 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2429 &p->phys_offset, IO_MEM_UNASSIGNED,
2430 addr & TARGET_PAGE_MASK);
2431 subpage_register(subpage, start_addr2, end_addr2,
2432 phys_offset, region_offset);
2433 p->region_offset = 0;
2437 region_offset += TARGET_PAGE_SIZE;
2440 /* since each CPU stores ram addresses in its TLB cache, we must
2441 reset the modified entries */
2443 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2448 /* XXX: temporary until new memory mapping API */
2449 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2453 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2455 return IO_MEM_UNASSIGNED;
2456 return p->phys_offset;
2459 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2462 kvm_coalesce_mmio_region(addr, size);
2465 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2468 kvm_uncoalesce_mmio_region(addr, size);
2472 /* XXX: better than nothing */
2473 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2476 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2477 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2478 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2481 addr = last_ram_offset;
2482 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2487 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2489 RAMBlock *new_block;
2492 if (kqemu_phys_ram_base) {
2493 return kqemu_ram_alloc(size);
2497 size = TARGET_PAGE_ALIGN(size);
2498 new_block = qemu_malloc(sizeof(*new_block));
2500 new_block->host = qemu_vmalloc(size);
2501 new_block->offset = last_ram_offset;
2502 new_block->length = size;
2504 new_block->next = ram_blocks;
2505 ram_blocks = new_block;
2507 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2508 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2509 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2510 0xff, size >> TARGET_PAGE_BITS);
2512 last_ram_offset += size;
2515 kvm_setup_guest_memory(new_block->host, size);
2517 return new_block->offset;
2520 void qemu_ram_free(ram_addr_t addr)
2522 /* TODO: implement this. */
2525 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2526 With the exception of the softmmu code in this file, this should
2527 only be used for local memory (e.g. video ram) that the device owns,
2528 and knows it isn't going to access beyond the end of the block.
2530 It should not be used for general purpose DMA.
2531 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2533 void *qemu_get_ram_ptr(ram_addr_t addr)
2540 if (kqemu_phys_ram_base) {
2541 return kqemu_phys_ram_base + addr;
2546 prevp = &ram_blocks;
2548 while (block && (block->offset > addr
2549 || block->offset + block->length <= addr)) {
2551 prevp = &prev->next;
2553 block = block->next;
2556 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2559 /* Move this entry to to start of the list. */
2561 prev->next = block->next;
2562 block->next = *prevp;
2565 return block->host + (addr - block->offset);
2568 /* Some of the softmmu routines need to translate from a host pointer
2569 (typically a TLB entry) back to a ram offset. */
2570 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2575 uint8_t *host = ptr;
2578 if (kqemu_phys_ram_base) {
2579 return host - kqemu_phys_ram_base;
2584 prevp = &ram_blocks;
2586 while (block && (block->host > host
2587 || block->host + block->length <= host)) {
2589 prevp = &prev->next;
2591 block = block->next;
2594 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2597 return block->offset + (host - block->host);
2600 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2602 #ifdef DEBUG_UNASSIGNED
2603 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2605 #if defined(TARGET_SPARC)
2606 do_unassigned_access(addr, 0, 0, 0, 1);
2611 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2613 #ifdef DEBUG_UNASSIGNED
2614 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2616 #if defined(TARGET_SPARC)
2617 do_unassigned_access(addr, 0, 0, 0, 2);
2622 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2624 #ifdef DEBUG_UNASSIGNED
2625 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2627 #if defined(TARGET_SPARC)
2628 do_unassigned_access(addr, 0, 0, 0, 4);
2633 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2635 #ifdef DEBUG_UNASSIGNED
2636 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2638 #if defined(TARGET_SPARC)
2639 do_unassigned_access(addr, 1, 0, 0, 1);
2643 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2645 #ifdef DEBUG_UNASSIGNED
2646 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2648 #if defined(TARGET_SPARC)
2649 do_unassigned_access(addr, 1, 0, 0, 2);
2653 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2655 #ifdef DEBUG_UNASSIGNED
2656 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2658 #if defined(TARGET_SPARC)
2659 do_unassigned_access(addr, 1, 0, 0, 4);
2663 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2664 unassigned_mem_readb,
2665 unassigned_mem_readw,
2666 unassigned_mem_readl,
2669 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2670 unassigned_mem_writeb,
2671 unassigned_mem_writew,
2672 unassigned_mem_writel,
2675 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2679 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2680 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2681 #if !defined(CONFIG_USER_ONLY)
2682 tb_invalidate_phys_page_fast(ram_addr, 1);
2683 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2686 stb_p(qemu_get_ram_ptr(ram_addr), val);
2688 if (cpu_single_env->kqemu_enabled &&
2689 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2690 kqemu_modify_page(cpu_single_env, ram_addr);
2692 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2693 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2694 /* we remove the notdirty callback only if the code has been
2696 if (dirty_flags == 0xff)
2697 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2700 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2704 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2705 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2706 #if !defined(CONFIG_USER_ONLY)
2707 tb_invalidate_phys_page_fast(ram_addr, 2);
2708 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2711 stw_p(qemu_get_ram_ptr(ram_addr), val);
2713 if (cpu_single_env->kqemu_enabled &&
2714 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2715 kqemu_modify_page(cpu_single_env, ram_addr);
2717 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2718 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2719 /* we remove the notdirty callback only if the code has been
2721 if (dirty_flags == 0xff)
2722 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2725 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2729 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2730 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2731 #if !defined(CONFIG_USER_ONLY)
2732 tb_invalidate_phys_page_fast(ram_addr, 4);
2733 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2736 stl_p(qemu_get_ram_ptr(ram_addr), val);
2738 if (cpu_single_env->kqemu_enabled &&
2739 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2740 kqemu_modify_page(cpu_single_env, ram_addr);
2742 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2743 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2744 /* we remove the notdirty callback only if the code has been
2746 if (dirty_flags == 0xff)
2747 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2750 static CPUReadMemoryFunc *error_mem_read[3] = {
2751 NULL, /* never used */
2752 NULL, /* never used */
2753 NULL, /* never used */
2756 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2757 notdirty_mem_writeb,
2758 notdirty_mem_writew,
2759 notdirty_mem_writel,
2762 /* Generate a debug exception if a watchpoint has been hit. */
2763 static void check_watchpoint(int offset, int len_mask, int flags)
2765 CPUState *env = cpu_single_env;
2766 target_ulong pc, cs_base;
2767 TranslationBlock *tb;
2772 if (env->watchpoint_hit) {
2773 /* We re-entered the check after replacing the TB. Now raise
2774 * the debug interrupt so that is will trigger after the
2775 * current instruction. */
2776 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2779 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2780 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2781 if ((vaddr == (wp->vaddr & len_mask) ||
2782 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2783 wp->flags |= BP_WATCHPOINT_HIT;
2784 if (!env->watchpoint_hit) {
2785 env->watchpoint_hit = wp;
2786 tb = tb_find_pc(env->mem_io_pc);
2788 cpu_abort(env, "check_watchpoint: could not find TB for "
2789 "pc=%p", (void *)env->mem_io_pc);
2791 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2792 tb_phys_invalidate(tb, -1);
2793 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2794 env->exception_index = EXCP_DEBUG;
2796 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2797 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2799 cpu_resume_from_signal(env, NULL);
2802 wp->flags &= ~BP_WATCHPOINT_HIT;
2807 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2808 so these check for a hit then pass through to the normal out-of-line
2810 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2812 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2813 return ldub_phys(addr);
2816 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2818 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2819 return lduw_phys(addr);
2822 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2824 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2825 return ldl_phys(addr);
2828 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2831 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2832 stb_phys(addr, val);
2835 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2838 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2839 stw_phys(addr, val);
2842 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2845 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2846 stl_phys(addr, val);
2849 static CPUReadMemoryFunc *watch_mem_read[3] = {
2855 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2861 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2867 idx = SUBPAGE_IDX(addr);
2868 #if defined(DEBUG_SUBPAGE)
2869 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2870 mmio, len, addr, idx);
2872 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2873 addr + mmio->region_offset[idx][0][len]);
2878 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2879 uint32_t value, unsigned int len)
2883 idx = SUBPAGE_IDX(addr);
2884 #if defined(DEBUG_SUBPAGE)
2885 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2886 mmio, len, addr, idx, value);
2888 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2889 addr + mmio->region_offset[idx][1][len],
2893 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2895 #if defined(DEBUG_SUBPAGE)
2896 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2899 return subpage_readlen(opaque, addr, 0);
2902 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2905 #if defined(DEBUG_SUBPAGE)
2906 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2908 subpage_writelen(opaque, addr, value, 0);
2911 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2913 #if defined(DEBUG_SUBPAGE)
2914 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2917 return subpage_readlen(opaque, addr, 1);
2920 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2923 #if defined(DEBUG_SUBPAGE)
2924 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2926 subpage_writelen(opaque, addr, value, 1);
2929 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2931 #if defined(DEBUG_SUBPAGE)
2932 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2935 return subpage_readlen(opaque, addr, 2);
2938 static void subpage_writel (void *opaque,
2939 target_phys_addr_t addr, uint32_t value)
2941 #if defined(DEBUG_SUBPAGE)
2942 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2944 subpage_writelen(opaque, addr, value, 2);
2947 static CPUReadMemoryFunc *subpage_read[] = {
2953 static CPUWriteMemoryFunc *subpage_write[] = {
2959 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2960 ram_addr_t memory, ram_addr_t region_offset)
2965 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2967 idx = SUBPAGE_IDX(start);
2968 eidx = SUBPAGE_IDX(end);
2969 #if defined(DEBUG_SUBPAGE)
2970 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2971 mmio, start, end, idx, eidx, memory);
2973 memory >>= IO_MEM_SHIFT;
2974 for (; idx <= eidx; idx++) {
2975 for (i = 0; i < 4; i++) {
2976 if (io_mem_read[memory][i]) {
2977 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2978 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2979 mmio->region_offset[idx][0][i] = region_offset;
2981 if (io_mem_write[memory][i]) {
2982 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2983 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2984 mmio->region_offset[idx][1][i] = region_offset;
2992 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2993 ram_addr_t orig_memory, ram_addr_t region_offset)
2998 mmio = qemu_mallocz(sizeof(subpage_t));
3001 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3002 #if defined(DEBUG_SUBPAGE)
3003 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3004 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3006 *phys = subpage_memory | IO_MEM_SUBPAGE;
3007 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3013 static int get_free_io_mem_idx(void)
3017 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3018 if (!io_mem_used[i]) {
3022 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3026 static void io_mem_init(void)
3030 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3031 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3032 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3036 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3037 watch_mem_write, NULL);
3039 if (kqemu_phys_ram_base) {
3040 /* alloc dirty bits array */
3041 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3042 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3047 /* mem_read and mem_write are arrays of functions containing the
3048 function to access byte (index 0), word (index 1) and dword (index
3049 2). Functions can be omitted with a NULL function pointer.
3050 If io_index is non zero, the corresponding io zone is
3051 modified. If it is zero, a new io zone is allocated. The return
3052 value can be used with cpu_register_physical_memory(). (-1) is
3053 returned if error. */
3054 int cpu_register_io_memory(int io_index,
3055 CPUReadMemoryFunc **mem_read,
3056 CPUWriteMemoryFunc **mem_write,
3059 int i, subwidth = 0;
3061 if (io_index <= 0) {
3062 io_index = get_free_io_mem_idx();
3066 if (io_index >= IO_MEM_NB_ENTRIES)
3070 for(i = 0;i < 3; i++) {
3071 if (!mem_read[i] || !mem_write[i])
3072 subwidth = IO_MEM_SUBWIDTH;
3073 io_mem_read[io_index][i] = mem_read[i];
3074 io_mem_write[io_index][i] = mem_write[i];
3076 io_mem_opaque[io_index] = opaque;
3077 return (io_index << IO_MEM_SHIFT) | subwidth;
3080 void cpu_unregister_io_memory(int io_table_address)
3083 int io_index = io_table_address >> IO_MEM_SHIFT;
3085 for (i=0;i < 3; i++) {
3086 io_mem_read[io_index][i] = unassigned_mem_read[i];
3087 io_mem_write[io_index][i] = unassigned_mem_write[i];
3089 io_mem_opaque[io_index] = NULL;
3090 io_mem_used[io_index] = 0;
3093 #endif /* !defined(CONFIG_USER_ONLY) */
3095 /* physical memory access (slow version, mainly for debug) */
3096 #if defined(CONFIG_USER_ONLY)
3097 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3098 int len, int is_write)
3105 page = addr & TARGET_PAGE_MASK;
3106 l = (page + TARGET_PAGE_SIZE) - addr;
3109 flags = page_get_flags(page);
3110 if (!(flags & PAGE_VALID))
3113 if (!(flags & PAGE_WRITE))
3115 /* XXX: this code should not depend on lock_user */
3116 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3117 /* FIXME - should this return an error rather than just fail? */
3120 unlock_user(p, addr, l);
3122 if (!(flags & PAGE_READ))
3124 /* XXX: this code should not depend on lock_user */
3125 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3126 /* FIXME - should this return an error rather than just fail? */
3129 unlock_user(p, addr, 0);
3138 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3139 int len, int is_write)
3144 target_phys_addr_t page;
3149 page = addr & TARGET_PAGE_MASK;
3150 l = (page + TARGET_PAGE_SIZE) - addr;
3153 p = phys_page_find(page >> TARGET_PAGE_BITS);
3155 pd = IO_MEM_UNASSIGNED;
3157 pd = p->phys_offset;
3161 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3162 target_phys_addr_t addr1 = addr;
3163 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3165 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3166 /* XXX: could force cpu_single_env to NULL to avoid
3168 if (l >= 4 && ((addr1 & 3) == 0)) {
3169 /* 32 bit write access */
3171 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3173 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3174 /* 16 bit write access */
3176 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3179 /* 8 bit write access */
3181 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3185 unsigned long addr1;
3186 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3188 ptr = qemu_get_ram_ptr(addr1);
3189 memcpy(ptr, buf, l);
3190 if (!cpu_physical_memory_is_dirty(addr1)) {
3191 /* invalidate code */
3192 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3194 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3195 (0xff & ~CODE_DIRTY_FLAG);
3199 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3200 !(pd & IO_MEM_ROMD)) {
3201 target_phys_addr_t addr1 = addr;
3203 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3205 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3206 if (l >= 4 && ((addr1 & 3) == 0)) {
3207 /* 32 bit read access */
3208 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3211 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3212 /* 16 bit read access */
3213 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3217 /* 8 bit read access */
3218 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3224 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3225 (addr & ~TARGET_PAGE_MASK);
3226 memcpy(buf, ptr, l);
3235 /* used for ROM loading : can write in RAM and ROM */
3236 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3237 const uint8_t *buf, int len)
3241 target_phys_addr_t page;
3246 page = addr & TARGET_PAGE_MASK;
3247 l = (page + TARGET_PAGE_SIZE) - addr;
3250 p = phys_page_find(page >> TARGET_PAGE_BITS);
3252 pd = IO_MEM_UNASSIGNED;
3254 pd = p->phys_offset;
3257 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3258 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3259 !(pd & IO_MEM_ROMD)) {
3262 unsigned long addr1;
3263 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3265 ptr = qemu_get_ram_ptr(addr1);
3266 memcpy(ptr, buf, l);
3276 target_phys_addr_t addr;
3277 target_phys_addr_t len;
3280 static BounceBuffer bounce;
3282 typedef struct MapClient {
3284 void (*callback)(void *opaque);
3285 LIST_ENTRY(MapClient) link;
3288 static LIST_HEAD(map_client_list, MapClient) map_client_list
3289 = LIST_HEAD_INITIALIZER(map_client_list);
3291 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3293 MapClient *client = qemu_malloc(sizeof(*client));
3295 client->opaque = opaque;
3296 client->callback = callback;
3297 LIST_INSERT_HEAD(&map_client_list, client, link);
3301 void cpu_unregister_map_client(void *_client)
3303 MapClient *client = (MapClient *)_client;
3305 LIST_REMOVE(client, link);
3308 static void cpu_notify_map_clients(void)
3312 while (!LIST_EMPTY(&map_client_list)) {
3313 client = LIST_FIRST(&map_client_list);
3314 client->callback(client->opaque);
3315 LIST_REMOVE(client, link);
3319 /* Map a physical memory region into a host virtual address.
3320 * May map a subset of the requested range, given by and returned in *plen.
3321 * May return NULL if resources needed to perform the mapping are exhausted.
3322 * Use only for reads OR writes - not for read-modify-write operations.
3323 * Use cpu_register_map_client() to know when retrying the map operation is
3324 * likely to succeed.
3326 void *cpu_physical_memory_map(target_phys_addr_t addr,
3327 target_phys_addr_t *plen,
3330 target_phys_addr_t len = *plen;
3331 target_phys_addr_t done = 0;
3333 uint8_t *ret = NULL;
3335 target_phys_addr_t page;
3338 unsigned long addr1;
3341 page = addr & TARGET_PAGE_MASK;
3342 l = (page + TARGET_PAGE_SIZE) - addr;
3345 p = phys_page_find(page >> TARGET_PAGE_BITS);
3347 pd = IO_MEM_UNASSIGNED;
3349 pd = p->phys_offset;
3352 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3353 if (done || bounce.buffer) {
3356 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3360 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3362 ptr = bounce.buffer;
3364 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3365 ptr = qemu_get_ram_ptr(addr1);
3369 } else if (ret + done != ptr) {
3381 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3382 * Will also mark the memory as dirty if is_write == 1. access_len gives
3383 * the amount of memory that was actually read or written by the caller.
3385 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3386 int is_write, target_phys_addr_t access_len)
3388 if (buffer != bounce.buffer) {
3390 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3391 while (access_len) {
3393 l = TARGET_PAGE_SIZE;
3396 if (!cpu_physical_memory_is_dirty(addr1)) {
3397 /* invalidate code */
3398 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3400 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3401 (0xff & ~CODE_DIRTY_FLAG);
3410 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3412 qemu_free(bounce.buffer);
3413 bounce.buffer = NULL;
3414 cpu_notify_map_clients();
3417 /* warning: addr must be aligned */
3418 uint32_t ldl_phys(target_phys_addr_t addr)
3426 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3428 pd = IO_MEM_UNASSIGNED;
3430 pd = p->phys_offset;
3433 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3434 !(pd & IO_MEM_ROMD)) {
3436 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3438 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3439 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3442 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3443 (addr & ~TARGET_PAGE_MASK);
3449 /* warning: addr must be aligned */
3450 uint64_t ldq_phys(target_phys_addr_t addr)
3458 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3460 pd = IO_MEM_UNASSIGNED;
3462 pd = p->phys_offset;
3465 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3466 !(pd & IO_MEM_ROMD)) {
3468 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3470 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3471 #ifdef TARGET_WORDS_BIGENDIAN
3472 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3473 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3475 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3476 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3480 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3481 (addr & ~TARGET_PAGE_MASK);
3488 uint32_t ldub_phys(target_phys_addr_t addr)
3491 cpu_physical_memory_read(addr, &val, 1);
3496 uint32_t lduw_phys(target_phys_addr_t addr)
3499 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3500 return tswap16(val);
3503 /* warning: addr must be aligned. The ram page is not masked as dirty
3504 and the code inside is not invalidated. It is useful if the dirty
3505 bits are used to track modified PTEs */
3506 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3513 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3515 pd = IO_MEM_UNASSIGNED;
3517 pd = p->phys_offset;
3520 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3521 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3523 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3524 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3526 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3527 ptr = qemu_get_ram_ptr(addr1);
3530 if (unlikely(in_migration)) {
3531 if (!cpu_physical_memory_is_dirty(addr1)) {
3532 /* invalidate code */
3533 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3535 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3536 (0xff & ~CODE_DIRTY_FLAG);
3542 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3549 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3551 pd = IO_MEM_UNASSIGNED;
3553 pd = p->phys_offset;
3556 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3557 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3559 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3560 #ifdef TARGET_WORDS_BIGENDIAN
3561 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3562 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3564 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3565 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3568 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3569 (addr & ~TARGET_PAGE_MASK);
3574 /* warning: addr must be aligned */
3575 void stl_phys(target_phys_addr_t addr, uint32_t val)
3582 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3584 pd = IO_MEM_UNASSIGNED;
3586 pd = p->phys_offset;
3589 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3590 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3592 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3593 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3595 unsigned long addr1;
3596 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3598 ptr = qemu_get_ram_ptr(addr1);
3600 if (!cpu_physical_memory_is_dirty(addr1)) {
3601 /* invalidate code */
3602 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3604 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3605 (0xff & ~CODE_DIRTY_FLAG);
3611 void stb_phys(target_phys_addr_t addr, uint32_t val)
3614 cpu_physical_memory_write(addr, &v, 1);
3618 void stw_phys(target_phys_addr_t addr, uint32_t val)
3620 uint16_t v = tswap16(val);
3621 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3625 void stq_phys(target_phys_addr_t addr, uint64_t val)
3628 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3633 /* virtual memory access for debug (includes writing to ROM) */
3634 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3635 uint8_t *buf, int len, int is_write)
3638 target_phys_addr_t phys_addr;
3642 page = addr & TARGET_PAGE_MASK;
3643 phys_addr = cpu_get_phys_page_debug(env, page);
3644 /* if no physical page mapped, return an error */
3645 if (phys_addr == -1)
3647 l = (page + TARGET_PAGE_SIZE) - addr;
3650 phys_addr += (addr & ~TARGET_PAGE_MASK);
3651 #if !defined(CONFIG_USER_ONLY)
3653 cpu_physical_memory_write_rom(phys_addr, buf, l);
3656 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3664 /* in deterministic execution mode, instructions doing device I/Os
3665 must be at the end of the TB */
3666 void cpu_io_recompile(CPUState *env, void *retaddr)
3668 TranslationBlock *tb;
3670 target_ulong pc, cs_base;
3673 tb = tb_find_pc((unsigned long)retaddr);
3675 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3678 n = env->icount_decr.u16.low + tb->icount;
3679 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3680 /* Calculate how many instructions had been executed before the fault
3682 n = n - env->icount_decr.u16.low;
3683 /* Generate a new TB ending on the I/O insn. */
3685 /* On MIPS and SH, delay slot instructions can only be restarted if
3686 they were already the first instruction in the TB. If this is not
3687 the first instruction in a TB then re-execute the preceding
3689 #if defined(TARGET_MIPS)
3690 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3691 env->active_tc.PC -= 4;
3692 env->icount_decr.u16.low++;
3693 env->hflags &= ~MIPS_HFLAG_BMASK;
3695 #elif defined(TARGET_SH4)
3696 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3699 env->icount_decr.u16.low++;
3700 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3703 /* This should never happen. */
3704 if (n > CF_COUNT_MASK)
3705 cpu_abort(env, "TB too big during recompile");
3707 cflags = n | CF_LAST_IO;
3709 cs_base = tb->cs_base;
3711 tb_phys_invalidate(tb, -1);
3712 /* FIXME: In theory this could raise an exception. In practice
3713 we have already translated the block once so it's probably ok. */
3714 tb_gen_code(env, pc, cs_base, flags, cflags);
3715 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3716 the first in the TB) then we end up generating a whole new TB and
3717 repeating the fault, which is horribly inefficient.
3718 Better would be to execute just this insn uncached, or generate a
3720 cpu_resume_from_signal(env, NULL);
3723 void dump_exec_info(FILE *f,
3724 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3726 int i, target_code_size, max_target_code_size;
3727 int direct_jmp_count, direct_jmp2_count, cross_page;
3728 TranslationBlock *tb;
3730 target_code_size = 0;
3731 max_target_code_size = 0;
3733 direct_jmp_count = 0;
3734 direct_jmp2_count = 0;
3735 for(i = 0; i < nb_tbs; i++) {
3737 target_code_size += tb->size;
3738 if (tb->size > max_target_code_size)
3739 max_target_code_size = tb->size;
3740 if (tb->page_addr[1] != -1)
3742 if (tb->tb_next_offset[0] != 0xffff) {
3744 if (tb->tb_next_offset[1] != 0xffff) {
3745 direct_jmp2_count++;
3749 /* XXX: avoid using doubles ? */
3750 cpu_fprintf(f, "Translation buffer state:\n");
3751 cpu_fprintf(f, "gen code size %ld/%ld\n",
3752 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3753 cpu_fprintf(f, "TB count %d/%d\n",
3754 nb_tbs, code_gen_max_blocks);
3755 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3756 nb_tbs ? target_code_size / nb_tbs : 0,
3757 max_target_code_size);
3758 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3759 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3760 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3761 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3763 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3764 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3766 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3768 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3769 cpu_fprintf(f, "\nStatistics:\n");
3770 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3771 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3772 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3773 tcg_dump_info(f, cpu_fprintf);
3776 #if !defined(CONFIG_USER_ONLY)
3778 #define MMUSUFFIX _cmmu
3779 #define GETPC() NULL
3780 #define env cpu_single_env
3781 #define SOFTMMU_CODE_ACCESS
3784 #include "softmmu_template.h"
3787 #include "softmmu_template.h"
3790 #include "softmmu_template.h"
3793 #include "softmmu_template.h"