#include <sys/mman.h>
#include "config.h"
-#ifdef TARGET_I386
-#include "cpu-i386.h"
-#endif
-#ifdef TARGET_ARM
-#include "cpu-arm.h"
-#endif
-#include "exec.h"
+#include "cpu.h"
+#include "exec-all.h"
//#define DEBUG_TB_INVALIDATE
//#define DEBUG_FLUSH
#define L2_SIZE (1 << L2_BITS)
static void tb_invalidate_page(unsigned long address);
+static void io_mem_init(void);
unsigned long real_host_page_size;
unsigned long host_page_bits;
static PageDesc *l1_map[L1_SIZE];
+/* io memory support */
+static unsigned long *l1_physmap[L1_SIZE];
+CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
+CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
+static int io_mem_nb;
+
+/* log support */
+char *logfilename = "/tmp/qemu.log";
+FILE *logfile;
+int loglevel;
+
static void page_init(void)
{
/* NOTE: we can always suppose that host_page_size >=
if (!code_gen_ptr) {
code_gen_ptr = code_gen_buffer;
page_init();
+ io_mem_init();
}
}
prot = 0;
for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
prot |= page_get_flags(addr);
+#if !defined(CONFIG_SOFTMMU)
mprotect((void *)host_start, host_page_size,
(prot & PAGE_BITS) & ~PAGE_WRITE);
+#endif
+#if !defined(CONFIG_USER_ONLY)
+ /* suppress soft TLB */
+ /* XXX: must flush on all processor with same address space */
+ tlb_flush_page_write(cpu_single_env, host_start);
+#endif
#ifdef DEBUG_TB_INVALIDATE
printf("protecting code page: 0x%08lx\n",
host_start);
#endif
p->flags &= ~PAGE_WRITE;
-#ifdef DEBUG_TB_CHECK
- tb_page_check();
-#endif
}
}
if (page_index2 != page_index1) {
tb_alloc_page(tb, page_index2);
}
+#ifdef DEBUG_TB_CHECK
+ tb_page_check();
+#endif
tb->jmp_first = (TranslationBlock *)((long)tb | 2);
tb->jmp_next[0] = NULL;
tb->jmp_next[1] = NULL;
/* if the page was really writable, then we change its
protection back to writable */
if (prot & PAGE_WRITE_ORG) {
- mprotect((void *)host_start, host_page_size,
- (prot & PAGE_BITS) | PAGE_WRITE);
pindex = (address - host_start) >> TARGET_PAGE_BITS;
- p1[pindex].flags |= PAGE_WRITE;
- /* and since the content will be modified, we must invalidate
- the corresponding translated code. */
- tb_invalidate_page(address);
+ if (!(p1[pindex].flags & PAGE_WRITE)) {
+#if !defined(CONFIG_SOFTMMU)
+ mprotect((void *)host_start, host_page_size,
+ (prot & PAGE_BITS) | PAGE_WRITE);
+#endif
+ p1[pindex].flags |= PAGE_WRITE;
+ /* and since the content will be modified, we must invalidate
+ the corresponding translated code. */
+ tb_invalidate_page(address);
#ifdef DEBUG_TB_CHECK
- tb_invalidate_check(address);
+ tb_invalidate_check(address);
#endif
- return 1;
- } else {
- return 0;
+ return 1;
+ }
}
+ return 0;
}
/* call this function when system calls directly modify a memory area */
tb_reset_jump_recursive2(tb, 1);
}
-void cpu_interrupt(CPUState *env)
+/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
+ breakpoint is reached */
+int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
{
- TranslationBlock *tb;
+#if defined(TARGET_I386)
+ int i;
+
+ for(i = 0; i < env->nb_breakpoints; i++) {
+ if (env->breakpoints[i] == pc)
+ return 0;
+ }
+
+ if (env->nb_breakpoints >= MAX_BREAKPOINTS)
+ return -1;
+ env->breakpoints[env->nb_breakpoints++] = pc;
+ tb_invalidate_page(pc);
+ return 0;
+#else
+ return -1;
+#endif
+}
- env->interrupt_request = 1;
+/* remove a breakpoint */
+int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
+{
+#if defined(TARGET_I386)
+ int i;
+ for(i = 0; i < env->nb_breakpoints; i++) {
+ if (env->breakpoints[i] == pc)
+ goto found;
+ }
+ return -1;
+ found:
+ memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
+ (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
+ env->nb_breakpoints--;
+ tb_invalidate_page(pc);
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+/* enable or disable single step mode. EXCP_DEBUG is returned by the
+ CPU loop after each instruction */
+void cpu_single_step(CPUState *env, int enabled)
+{
+#if defined(TARGET_I386)
+ if (env->singlestep_enabled != enabled) {
+ env->singlestep_enabled = enabled;
+ /* must flush all the translated code to avoid inconsistancies */
+ tb_flush();
+ }
+#endif
+}
+
+/* enable or disable low levels log */
+void cpu_set_log(int log_flags)
+{
+ loglevel = log_flags;
+ if (loglevel && !logfile) {
+ logfile = fopen(logfilename, "w");
+ if (!logfile) {
+ perror(logfilename);
+ _exit(1);
+ }
+ setvbuf(logfile, NULL, _IOLBF, 0);
+ }
+}
+
+void cpu_set_log_filename(const char *filename)
+{
+ logfilename = strdup(filename);
+}
+
+/* mask must never be zero */
+void cpu_interrupt(CPUState *env, int mask)
+{
+ TranslationBlock *tb;
+
+ env->interrupt_request |= mask;
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
tb = env->current_tb;
/* unmap all maped pages and flush all associated code */
void page_unmap(void)
{
- PageDesc *p, *pmap;
- unsigned long addr;
- int i, j, ret, j1;
+ PageDesc *pmap;
+ int i;
for(i = 0; i < L1_SIZE; i++) {
pmap = l1_map[i];
if (pmap) {
+#if !defined(CONFIG_SOFTMMU)
+ PageDesc *p;
+ unsigned long addr;
+ int j, ret, j1;
+
p = pmap;
for(j = 0;j < L2_SIZE;) {
if (p->flags & PAGE_VALID) {
j++;
}
}
+#endif
free(pmap);
l1_map[i] = NULL;
}
tb_flush();
}
#endif
+
+void tlb_flush(CPUState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ int i;
+ for(i = 0; i < CPU_TLB_SIZE; i++) {
+ env->tlb_read[0][i].address = -1;
+ env->tlb_write[0][i].address = -1;
+ env->tlb_read[1][i].address = -1;
+ env->tlb_write[1][i].address = -1;
+ }
+#endif
+}
+
+static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
+{
+ if (addr == (tlb_entry->address &
+ (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
+ tlb_entry->address = -1;
+}
+
+void tlb_flush_page(CPUState *env, uint32_t addr)
+{
+#if !defined(CONFIG_USER_ONLY)
+ int i;
+
+ addr &= TARGET_PAGE_MASK;
+ i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ tlb_flush_entry(&env->tlb_read[0][i], addr);
+ tlb_flush_entry(&env->tlb_write[0][i], addr);
+ tlb_flush_entry(&env->tlb_read[1][i], addr);
+ tlb_flush_entry(&env->tlb_write[1][i], addr);
+#endif
+}
+
+/* make all write to page 'addr' trigger a TLB exception to detect
+ self modifying code */
+void tlb_flush_page_write(CPUState *env, uint32_t addr)
+{
+#if !defined(CONFIG_USER_ONLY)
+ int i;
+
+ addr &= TARGET_PAGE_MASK;
+ i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ tlb_flush_entry(&env->tlb_write[0][i], addr);
+ tlb_flush_entry(&env->tlb_write[1][i], addr);
+#endif
+}
+
+static inline unsigned long *physpage_find_alloc(unsigned int page)
+{
+ unsigned long **lp, *p;
+ unsigned int index, i;
+
+ index = page >> TARGET_PAGE_BITS;
+ lp = &l1_physmap[index >> L2_BITS];
+ p = *lp;
+ if (!p) {
+ /* allocate if not found */
+ p = malloc(sizeof(unsigned long) * L2_SIZE);
+ for(i = 0; i < L2_SIZE; i++)
+ p[i] = IO_MEM_UNASSIGNED;
+ *lp = p;
+ }
+ return p + (index & (L2_SIZE - 1));
+}
+
+/* return NULL if no page defined (unused memory) */
+unsigned long physpage_find(unsigned long page)
+{
+ unsigned long *p;
+ unsigned int index;
+ index = page >> TARGET_PAGE_BITS;
+ p = l1_physmap[index >> L2_BITS];
+ if (!p)
+ return IO_MEM_UNASSIGNED;
+ return p[index & (L2_SIZE - 1)];
+}
+
+/* register physical memory. 'size' must be a multiple of the target
+ page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
+ io memory page */
+void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
+ long phys_offset)
+{
+ unsigned long addr, end_addr;
+ unsigned long *p;
+
+ end_addr = start_addr + size;
+ for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
+ p = physpage_find_alloc(addr);
+ *p = phys_offset;
+ if ((phys_offset & ~TARGET_PAGE_MASK) == 0)
+ phys_offset += TARGET_PAGE_SIZE;
+ }
+}
+
+static uint32_t unassigned_mem_readb(uint32_t addr)
+{
+ return 0;
+}
+
+static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
+{
+}
+
+static CPUReadMemoryFunc *unassigned_mem_read[3] = {
+ unassigned_mem_readb,
+ unassigned_mem_readb,
+ unassigned_mem_readb,
+};
+
+static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
+ unassigned_mem_writeb,
+ unassigned_mem_writeb,
+ unassigned_mem_writeb,
+};
+
+
+static void io_mem_init(void)
+{
+ io_mem_nb = 1;
+ cpu_register_io_memory(0, unassigned_mem_read, unassigned_mem_write);
+}
+
+/* mem_read and mem_write are arrays of functions containing the
+ function to access byte (index 0), word (index 1) and dword (index
+ 2). All functions must be supplied. If io_index is non zero, the
+ corresponding io zone is modified. If it is zero, a new io zone is
+ allocated. The return value can be used with
+ cpu_register_physical_memory(). (-1) is returned if error. */
+int cpu_register_io_memory(int io_index,
+ CPUReadMemoryFunc **mem_read,
+ CPUWriteMemoryFunc **mem_write)
+{
+ int i;
+
+ if (io_index <= 0) {
+ if (io_index >= IO_MEM_NB_ENTRIES)
+ return -1;
+ io_index = io_mem_nb++;
+ } else {
+ if (io_index >= IO_MEM_NB_ENTRIES)
+ return -1;
+ }
+
+ for(i = 0;i < 3; i++) {
+ io_mem_read[io_index][i] = mem_read[i];
+ io_mem_write[io_index][i] = mem_write[i];
+ }
+ return io_index << IO_MEM_SHIFT;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+#define MMUSUFFIX _cmmu
+#define GETPC() NULL
+#define env cpu_single_env
+
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+#undef env
+
+#endif