#endif
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
-TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
int nb_tbs;
/* any access to the tbs or the page table must use this lock */
uint8_t *phys_ram_base;
uint8_t *phys_ram_dirty;
+CPUState *first_cpu;
+/* current CPU in the current thread. It is only valid inside
+ cpu_exec() */
+CPUState *cpu_single_env;
+
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
TranslationBlock *first_tb;
uint32_t phys_offset;
} PhysPageDesc;
-/* Note: the VirtPage handling is absolete and will be suppressed
- ASAP */
-typedef struct VirtPageDesc {
- /* physical address of code page. It is valid only if 'valid_tag'
- matches 'virt_valid_tag' */
- target_ulong phys_addr;
- unsigned int valid_tag;
-#if !defined(CONFIG_SOFTMMU)
- /* original page access rights. It is valid only if 'valid_tag'
- matches 'virt_valid_tag' */
- unsigned int prot;
-#endif
-} VirtPageDesc;
-
#define L2_BITS 10
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
static PageDesc *l1_map[L1_SIZE];
PhysPageDesc **l1_phys_map;
-#if !defined(CONFIG_USER_ONLY)
-#if TARGET_LONG_BITS > 32
-#define VIRT_L_BITS 9
-#define VIRT_L_SIZE (1 << VIRT_L_BITS)
-static void *l1_virt_map[VIRT_L_SIZE];
-#else
-static VirtPageDesc *l1_virt_map[L1_SIZE];
-#endif
-static unsigned int virt_valid_tag;
-#endif
-
/* io memory support */
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
while ((1 << qemu_host_page_bits) < qemu_host_page_size)
qemu_host_page_bits++;
qemu_host_page_mask = ~(qemu_host_page_size - 1);
-#if !defined(CONFIG_USER_ONLY)
- virt_valid_tag = 1;
-#endif
l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
}
}
#if !defined(CONFIG_USER_ONLY)
-static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
- target_ulong vaddr);
+static void tlb_protect_code(ram_addr_t ram_addr);
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
target_ulong vaddr);
-
-static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
-{
-#if TARGET_LONG_BITS > 32
- void **p, **lp;
-
- p = l1_virt_map;
- lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
- p = *lp;
- if (!p) {
- if (!alloc)
- return NULL;
- p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
- *lp = p;
- }
- lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
- p = *lp;
- if (!p) {
- if (!alloc)
- return NULL;
- p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
- *lp = p;
- }
- lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
- p = *lp;
- if (!p) {
- if (!alloc)
- return NULL;
- p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
- *lp = p;
- }
- lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
- p = *lp;
- if (!p) {
- if (!alloc)
- return NULL;
- p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
- *lp = p;
- }
- lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
- p = *lp;
- if (!p) {
- if (!alloc)
- return NULL;
- p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
- *lp = p;
- }
- return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
-#else
- VirtPageDesc *p, **lp;
-
- lp = &l1_virt_map[index >> L2_BITS];
- p = *lp;
- if (!p) {
- /* allocate if not found */
- if (!alloc)
- return NULL;
- p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
- *lp = p;
- }
- return p + (index & (L2_SIZE - 1));
-#endif
-}
-
-static inline VirtPageDesc *virt_page_find(target_ulong index)
-{
- return virt_page_find_alloc(index, 0);
-}
-
-#if TARGET_LONG_BITS > 32
-static void virt_page_flush_internal(void **p, int level)
-{
- int i;
- if (level == 0) {
- VirtPageDesc *q = (VirtPageDesc *)p;
- for(i = 0; i < VIRT_L_SIZE; i++)
- q[i].valid_tag = 0;
- } else {
- level--;
- for(i = 0; i < VIRT_L_SIZE; i++) {
- if (p[i])
- virt_page_flush_internal(p[i], level);
- }
- }
-}
#endif
-static void virt_page_flush(void)
-{
- virt_valid_tag++;
-
- if (virt_valid_tag == 0) {
- virt_valid_tag = 1;
-#if TARGET_LONG_BITS > 32
- virt_page_flush_internal(l1_virt_map, 5);
-#else
- {
- int i, j;
- VirtPageDesc *p;
- for(i = 0; i < L1_SIZE; i++) {
- p = l1_virt_map[i];
- if (p) {
- for(j = 0; j < L2_SIZE; j++)
- p[j].valid_tag = 0;
- }
- }
- }
-#endif
- }
-}
-#else
-static void virt_page_flush(void)
+void cpu_exec_init(CPUState *env)
{
-}
-#endif
+ CPUState **penv;
+ int cpu_index;
-void cpu_exec_init(void)
-{
if (!code_gen_ptr) {
code_gen_ptr = code_gen_buffer;
page_init();
io_mem_init();
}
+ env->next_cpu = NULL;
+ penv = &first_cpu;
+ cpu_index = 0;
+ while (*penv != NULL) {
+ penv = (CPUState **)&(*penv)->next_cpu;
+ cpu_index++;
+ }
+ env->cpu_index = cpu_index;
+ *penv = env;
}
static inline void invalidate_page_bitmap(PageDesc *p)
/* flush all the translation blocks */
/* XXX: tb_flush is currently not thread safe */
-void tb_flush(CPUState *env)
+void tb_flush(CPUState *env1)
{
+ CPUState *env;
#if defined(DEBUG_FLUSH)
printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
code_gen_ptr - code_gen_buffer,
nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
#endif
nb_tbs = 0;
- memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
- virt_page_flush();
+
+ for(env = first_cpu; env != NULL; env = env->next_cpu) {
+ memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
+ }
memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
page_flush_tb();
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
}
-static inline void tb_invalidate(TranslationBlock *tb)
+static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
{
+ CPUState *env;
+ PageDesc *p;
unsigned int h, n1;
- TranslationBlock *tb1, *tb2, **ptb;
+ target_ulong phys_pc;
+ TranslationBlock *tb1, *tb2;
+ /* remove the TB from the hash list */
+ phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
+ h = tb_phys_hash_func(phys_pc);
+ tb_remove(&tb_phys_hash[h], tb,
+ offsetof(TranslationBlock, phys_hash_next));
+
+ /* remove the TB from the page list */
+ if (tb->page_addr[0] != page_addr) {
+ p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
+ tb_page_remove(&p->first_tb, tb);
+ invalidate_page_bitmap(p);
+ }
+ if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
+ p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
+ tb_page_remove(&p->first_tb, tb);
+ invalidate_page_bitmap(p);
+ }
+
tb_invalidated_flag = 1;
/* remove the TB from the hash list */
- h = tb_hash_func(tb->pc);
- ptb = &tb_hash[h];
- for(;;) {
- tb1 = *ptb;
- /* NOTE: the TB is not necessarily linked in the hash. It
- indicates that it is not currently used */
- if (tb1 == NULL)
- return;
- if (tb1 == tb) {
- *ptb = tb1->hash_next;
- break;
- }
- ptb = &tb1->hash_next;
+ h = tb_jmp_cache_hash_func(tb->pc);
+ for(env = first_cpu; env != NULL; env = env->next_cpu) {
+ if (env->tb_jmp_cache[h] == tb)
+ env->tb_jmp_cache[h] = NULL;
}
/* suppress this TB from the two jump lists */
tb1 = tb2;
}
tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
-}
-static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
-{
- PageDesc *p;
- unsigned int h;
- target_ulong phys_pc;
-
- /* remove the TB from the hash list */
- phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_phys_hash_func(phys_pc);
- tb_remove(&tb_phys_hash[h], tb,
- offsetof(TranslationBlock, phys_hash_next));
-
- /* remove the TB from the page list */
- if (tb->page_addr[0] != page_addr) {
- p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
- tb_page_remove(&p->first_tb, tb);
- invalidate_page_bitmap(p);
- }
- if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
- p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
- tb_page_remove(&p->first_tb, tb);
- invalidate_page_bitmap(p);
- }
-
- tb_invalidate(tb);
tb_phys_invalidate_count++;
}
protected. So we handle the case where only the first TB is
allocated in a physical page */
if (!last_first_tb) {
- target_ulong virt_addr;
-
- virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
- tlb_protect_code(cpu_single_env, page_addr, virt_addr);
+ tlb_protect_code(page_addr);
}
#endif
tb_alloc_page(tb, 1, phys_page2);
else
tb->page_addr[1] = -1;
-#ifdef DEBUG_TB_CHECK
- tb_page_check();
-#endif
-}
-
-/* link the tb with the other TBs */
-void tb_link(TranslationBlock *tb)
-{
-#if !defined(CONFIG_USER_ONLY)
- {
- VirtPageDesc *vp;
- target_ulong addr;
-
- /* save the code memory mappings (needed to invalidate the code) */
- addr = tb->pc & TARGET_PAGE_MASK;
- vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
-#ifdef DEBUG_TLB_CHECK
- if (vp->valid_tag == virt_valid_tag &&
- vp->phys_addr != tb->page_addr[0]) {
- printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
- addr, tb->page_addr[0], vp->phys_addr);
- }
-#endif
- vp->phys_addr = tb->page_addr[0];
- if (vp->valid_tag != virt_valid_tag) {
- vp->valid_tag = virt_valid_tag;
-#if !defined(CONFIG_SOFTMMU)
- vp->prot = 0;
-#endif
- }
-
- if (tb->page_addr[1] != -1) {
- addr += TARGET_PAGE_SIZE;
- vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
-#ifdef DEBUG_TLB_CHECK
- if (vp->valid_tag == virt_valid_tag &&
- vp->phys_addr != tb->page_addr[1]) {
- printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
- addr, tb->page_addr[1], vp->phys_addr);
- }
-#endif
- vp->phys_addr = tb->page_addr[1];
- if (vp->valid_tag != virt_valid_tag) {
- vp->valid_tag = virt_valid_tag;
-#if !defined(CONFIG_SOFTMMU)
- vp->prot = 0;
-#endif
- }
- }
- }
-#endif
tb->jmp_first = (TranslationBlock *)((long)tb | 2);
tb->jmp_next[0] = NULL;
tb_reset_jump(tb, 0);
if (tb->tb_next_offset[1] != 0xffff)
tb_reset_jump(tb, 1);
+
+#ifdef DEBUG_TB_CHECK
+ tb_page_check();
+#endif
}
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
env->tlb_write[1][i].address = -1;
}
- virt_page_flush();
- memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
+ memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
#if !defined(CONFIG_SOFTMMU)
munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
void tlb_flush_page(CPUState *env, target_ulong addr)
{
- int i, n;
- VirtPageDesc *vp;
- PageDesc *p;
+ int i;
TranslationBlock *tb;
#if defined(DEBUG_TLB)
tlb_flush_entry(&env->tlb_read[1][i], addr);
tlb_flush_entry(&env->tlb_write[1][i], addr);
- /* remove from the virtual pc hash table all the TB at this
- virtual address */
-
- vp = virt_page_find(addr >> TARGET_PAGE_BITS);
- if (vp && vp->valid_tag == virt_valid_tag) {
- p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
- if (p) {
- /* we remove all the links to the TBs in this virtual page */
- tb = p->first_tb;
- while (tb != NULL) {
- n = (long)tb & 3;
- tb = (TranslationBlock *)((long)tb & ~3);
- if ((tb->pc & TARGET_PAGE_MASK) == addr ||
- ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
- tb_invalidate(tb);
- }
- tb = tb->page_next[n];
- }
+ for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
+ tb = env->tb_jmp_cache[i];
+ if (tb &&
+ ((tb->pc & TARGET_PAGE_MASK) == addr ||
+ ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
+ env->tb_jmp_cache[i] = NULL;
}
- vp->valid_tag = 0;
}
#if !defined(CONFIG_SOFTMMU)
#endif
}
-static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
-{
- if (addr == (tlb_entry->address &
- (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
- (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
- tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
- }
-}
-
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
-static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
- target_ulong vaddr)
+static void tlb_protect_code(ram_addr_t ram_addr)
{
- int i;
-
- vaddr &= TARGET_PAGE_MASK;
- i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_protect_code1(&env->tlb_write[0][i], vaddr);
- tlb_protect_code1(&env->tlb_write[1][i], vaddr);
-
-#ifdef USE_KQEMU
- if (env->kqemu_enabled) {
- kqemu_set_notdirty(env, ram_addr);
- }
-#endif
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;
-
-#if !defined(CONFIG_SOFTMMU)
- /* NOTE: as we generated the code for this page, it is already at
- least readable */
- if (vaddr < MMAP_AREA_END)
- mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);
-#endif
+ cpu_physical_memory_reset_dirty(ram_addr,
+ ram_addr + TARGET_PAGE_SIZE,
+ CODE_DIRTY_FLAG);
}
/* update the TLB so that writes in physical page 'phys_addr' are no longer
if (length == 0)
return;
len = length >> TARGET_PAGE_BITS;
- env = cpu_single_env;
#ifdef USE_KQEMU
+ /* XXX: should not depend on cpu context */
+ env = first_cpu;
if (env->kqemu_enabled) {
ram_addr_t addr;
addr = start;
/* we modify the TLB cache so that the dirty bit will be set again
when accessing the range */
start1 = start + (unsigned long)phys_ram_base;
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
- for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
+ for(env = first_cpu; env != NULL; env = env->next_cpu) {
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
+ }
#if !defined(CONFIG_SOFTMMU)
/* XXX: this is expensive */
/* update the TLB corresponding to virtual page vaddr and phys addr
addr so that it is no longer dirty */
-static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
+static inline void tlb_set_dirty(CPUState *env,
+ unsigned long addr, target_ulong vaddr)
{
- CPUState *env = cpu_single_env;
int i;
addr &= TARGET_PAGE_MASK;
}
}
-static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
+static inline void tlb_set_dirty(CPUState *env,
+ unsigned long addr, target_ulong vaddr)
{
}
#endif /* defined(CONFIG_USER_ONLY) */
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
- tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
+ tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
}
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
- tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
+ tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
}
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
- tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
+ tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
}
static CPUReadMemoryFunc *error_mem_read[3] = {
if (is_write) {
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ /* XXX: could force cpu_single_env to NULL to avoid
+ potential bugs */
if (l >= 4 && ((addr & 3) == 0)) {
/* 32 bit write access */
val = ldl_p(buf);