#include "cpu.h"
#include "exec-all.h"
+#if defined(CONFIG_USER_ONLY)
+#include <qemu.h>
+#endif
//#define DEBUG_TB_INVALIDATE
//#define DEBUG_FLUSH
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
{
void **lp, **p;
+ PhysPageDesc *pd;
p = (void **)l1_phys_map;
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
}
#endif
lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
- p = *lp;
- if (!p) {
+ pd = *lp;
+ if (!pd) {
+ int i;
/* allocate if not found */
if (!alloc)
return NULL;
- p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
- memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
- *lp = p;
+ pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
+ *lp = pd;
+ for (i = 0; i < L2_SIZE; i++)
+ pd[i].phys_offset = IO_MEM_UNASSIGNED;
}
- return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
+ return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
}
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
/* add the tb in the target page and protect it if necessary */
static inline void tb_alloc_page(TranslationBlock *tb,
- unsigned int n, unsigned int page_addr)
+ unsigned int n, target_ulong page_addr)
{
PageDesc *p;
TranslationBlock *last_first_tb;
#if defined(CONFIG_USER_ONLY)
if (p->flags & PAGE_WRITE) {
- unsigned long host_start, host_end, addr;
+ target_ulong addr;
+ PageDesc *p2;
int prot;
/* force the host page as non writable (writes will have a
page fault + mprotect overhead) */
- host_start = page_addr & qemu_host_page_mask;
- host_end = host_start + qemu_host_page_size;
+ page_addr &= qemu_host_page_mask;
prot = 0;
- for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
- prot |= page_get_flags(addr);
- mprotect((void *)host_start, qemu_host_page_size,
+ for(addr = page_addr; addr < page_addr + qemu_host_page_size;
+ addr += TARGET_PAGE_SIZE) {
+
+ p2 = page_find (addr >> TARGET_PAGE_BITS);
+ if (!p2)
+ continue;
+ prot |= p2->flags;
+ p2->flags &= ~PAGE_WRITE;
+ page_get_flags(addr);
+ }
+ mprotect(g2h(page_addr), qemu_host_page_size,
(prot & PAGE_BITS) & ~PAGE_WRITE);
#ifdef DEBUG_TB_INVALIDATE
printf("protecting code page: 0x%08lx\n",
- host_start);
+ page_addr);
#endif
- p->flags &= ~PAGE_WRITE;
}
#else
/* if some code is already present, then the pages are already
#if defined(TARGET_HAS_ICE)
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
{
- target_ulong phys_addr;
+ target_ulong addr, pd;
+ ram_addr_t ram_addr;
+ PhysPageDesc *p;
- phys_addr = cpu_get_phys_page_debug(env, pc);
- tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
+ addr = cpu_get_phys_page_debug(env, pc);
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+ ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
+ tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
}
#endif
env->current_tb = NULL;
for(i = 0; i < CPU_TLB_SIZE; i++) {
- env->tlb_read[0][i].address = -1;
- env->tlb_write[0][i].address = -1;
- env->tlb_read[1][i].address = -1;
- env->tlb_write[1][i].address = -1;
+ env->tlb_table[0][i].addr_read = -1;
+ env->tlb_table[0][i].addr_write = -1;
+ env->tlb_table[0][i].addr_code = -1;
+ env->tlb_table[1][i].addr_read = -1;
+ env->tlb_table[1][i].addr_write = -1;
+ env->tlb_table[1][i].addr_code = -1;
}
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
- if (addr == (tlb_entry->address &
- (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
- tlb_entry->address = -1;
+ if (addr == (tlb_entry->addr_read &
+ (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
+ addr == (tlb_entry->addr_write &
+ (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
+ addr == (tlb_entry->addr_code &
+ (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ tlb_entry->addr_read = -1;
+ tlb_entry->addr_write = -1;
+ tlb_entry->addr_code = -1;
+ }
}
void tlb_flush_page(CPUState *env, target_ulong addr)
addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_flush_entry(&env->tlb_read[0][i], addr);
- tlb_flush_entry(&env->tlb_write[0][i], addr);
- tlb_flush_entry(&env->tlb_read[1][i], addr);
- tlb_flush_entry(&env->tlb_write[1][i], addr);
+ tlb_flush_entry(&env->tlb_table[0][i], addr);
+ tlb_flush_entry(&env->tlb_table[1][i], addr);
for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
tb = env->tb_jmp_cache[i];
unsigned long start, unsigned long length)
{
unsigned long addr;
- if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
- addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
+ if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
+ addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
if ((addr - start) < length) {
- tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
+ tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
}
}
}
start1 = start + (unsigned long)phys_ram_base;
for(env = first_cpu; env != NULL; env = env->next_cpu) {
for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
+ tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
+ tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
}
#if !defined(CONFIG_SOFTMMU)
{
ram_addr_t ram_addr;
- if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
- ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
+ if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
+ ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
tlb_entry->addend - (unsigned long)phys_ram_base;
if (!cpu_physical_memory_is_dirty(ram_addr)) {
- tlb_entry->address |= IO_MEM_NOTDIRTY;
+ tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
}
}
}
{
int i;
for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_update_dirty(&env->tlb_write[0][i]);
+ tlb_update_dirty(&env->tlb_table[0][i]);
for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_update_dirty(&env->tlb_write[1][i]);
+ tlb_update_dirty(&env->tlb_table[1][i]);
}
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
unsigned long start)
{
unsigned long addr;
- if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
- addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
+ if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
+ addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
if (addr == start) {
- tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
+ tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
}
}
}
addr &= TARGET_PAGE_MASK;
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_set_dirty1(&env->tlb_write[0][i], addr);
- tlb_set_dirty1(&env->tlb_write[1][i], addr);
+ tlb_set_dirty1(&env->tlb_table[0][i], addr);
+ tlb_set_dirty1(&env->tlb_table[1][i], addr);
}
/* add a new TLB entry. At most one entry for a given virtual address
is permitted. Return 0 if OK or 2 if the page could not be mapped
(can only happen in non SOFTMMU mode for I/O pages or pages
conflicting with the host address space). */
-int tlb_set_page(CPUState *env, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int is_user, int is_softmmu)
+int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
+ int is_user, int is_softmmu)
{
PhysPageDesc *p;
unsigned long pd;
target_ulong address;
target_phys_addr_t addend;
int ret;
+ CPUTLBEntry *te;
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
if (!p) {
}
#if defined(DEBUG_TLB)
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
- vaddr, paddr, prot, is_user, is_softmmu, pd);
+ vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
#endif
ret = 0;
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
addend -= vaddr;
+ te = &env->tlb_table[is_user][index];
+ te->addend = addend;
if (prot & PAGE_READ) {
- env->tlb_read[is_user][index].address = address;
- env->tlb_read[is_user][index].addend = addend;
+ te->addr_read = address;
} else {
- env->tlb_read[is_user][index].address = -1;
- env->tlb_read[is_user][index].addend = -1;
+ te->addr_read = -1;
+ }
+ if (prot & PAGE_EXEC) {
+ te->addr_code = address;
+ } else {
+ te->addr_code = -1;
}
if (prot & PAGE_WRITE) {
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
/* ROM: access is ignored (same as unassigned) */
- env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
- env->tlb_write[is_user][index].addend = addend;
+ te->addr_write = vaddr | IO_MEM_ROM;
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
!cpu_physical_memory_is_dirty(pd)) {
- env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
- env->tlb_write[is_user][index].addend = addend;
+ te->addr_write = vaddr | IO_MEM_NOTDIRTY;
} else {
- env->tlb_write[is_user][index].address = address;
- env->tlb_write[is_user][index].addend = addend;
+ te->addr_write = address;
}
} else {
- env->tlb_write[is_user][index].address = -1;
- env->tlb_write[is_user][index].addend = -1;
+ te->addr_write = -1;
}
}
#if !defined(CONFIG_SOFTMMU)
/* called from signal handler: invalidate the code and unprotect the
page. Return TRUE if the fault was succesfully handled. */
-int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
+int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
{
#if !defined(CONFIG_SOFTMMU)
VirtPageDesc *vp;
{
}
-int tlb_set_page(CPUState *env, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int is_user, int is_softmmu)
+int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
+ int is_user, int is_softmmu)
{
return 0;
}
}
}
-int page_get_flags(unsigned long address)
+int page_get_flags(target_ulong address)
{
PageDesc *p;
/* modify the flags of a page and invalidate the code if
necessary. The flag PAGE_WRITE_ORG is positionned automatically
depending on PAGE_WRITE */
-void page_set_flags(unsigned long start, unsigned long end, int flags)
+void page_set_flags(target_ulong start, target_ulong end, int flags)
{
PageDesc *p;
- unsigned long addr;
+ target_ulong addr;
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
/* called from signal handler: invalidate the code and unprotect the
page. Return TRUE if the fault was succesfully handled. */
-int page_unprotect(unsigned long address, unsigned long pc, void *puc)
+int page_unprotect(target_ulong address, unsigned long pc, void *puc)
{
unsigned int page_index, prot, pindex;
PageDesc *p, *p1;
- unsigned long host_start, host_end, addr;
+ target_ulong host_start, host_end, addr;
host_start = address & qemu_host_page_mask;
page_index = host_start >> TARGET_PAGE_BITS;
if (prot & PAGE_WRITE_ORG) {
pindex = (address - host_start) >> TARGET_PAGE_BITS;
if (!(p1[pindex].flags & PAGE_WRITE)) {
- mprotect((void *)host_start, qemu_host_page_size,
+ mprotect((void *)g2h(host_start), qemu_host_page_size,
(prot & PAGE_BITS) | PAGE_WRITE);
p1[pindex].flags |= PAGE_WRITE;
/* and since the content will be modified, we must invalidate
}
/* call this function when system calls directly modify a memory area */
-void page_unprotect_range(uint8_t *data, unsigned long data_size)
+/* ??? This should be redundant now we have lock_user. */
+void page_unprotect_range(target_ulong data, target_ulong data_size)
{
- unsigned long start, end, addr;
+ target_ulong start, end, addr;
- start = (unsigned long)data;
+ start = data;
end = start + data_size;
start &= TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
#endif
}
stb_p((uint8_t *)(long)addr, val);
+#ifdef USE_KQEMU
+ if (cpu_single_env->kqemu_enabled &&
+ (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
+ kqemu_modify_page(cpu_single_env, ram_addr);
+#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
#endif
}
stw_p((uint8_t *)(long)addr, val);
+#ifdef USE_KQEMU
+ if (cpu_single_env->kqemu_enabled &&
+ (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
+ kqemu_modify_page(cpu_single_env, ram_addr);
+#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
#endif
}
stl_p((uint8_t *)(long)addr, val);
+#ifdef USE_KQEMU
+ if (cpu_single_env->kqemu_enabled &&
+ (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
+ kqemu_modify_page(cpu_single_env, ram_addr);
+#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
{
int l, flags;
target_ulong page;
+ void * p;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
if (is_write) {
if (!(flags & PAGE_WRITE))
return;
- memcpy((uint8_t *)addr, buf, len);
+ p = lock_user(addr, len, 0);
+ memcpy(p, buf, len);
+ unlock_user(p, addr, len);
} else {
if (!(flags & PAGE_READ))
return;
- memcpy(buf, (uint8_t *)addr, len);
+ p = lock_user(addr, len, 1);
+ memcpy(buf, p, len);
+ unlock_user(p, addr, 0);
}
len -= l;
buf += l;
return val;
}
+/* warning: addr must be aligned */
+uint64_t ldq_phys(target_phys_addr_t addr)
+{
+ int io_index;
+ uint8_t *ptr;
+ uint64_t val;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
+ /* I/O case */
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+#ifdef TARGET_WORDS_BIGENDIAN
+ val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
+ val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
+#else
+ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+ val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
+#endif
+ } else {
+ /* RAM case */
+ ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ val = ldq_p(ptr);
+ }
+ return val;
+}
+
/* XXX: optimize */
uint32_t ldub_phys(target_phys_addr_t addr)
{
return tswap16(val);
}
-/* XXX: optimize */
-uint64_t ldq_phys(target_phys_addr_t addr)
-{
- uint64_t val;
- cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
- return tswap64(val);
-}
-
/* warning: addr must be aligned. The ram page is not masked as dirty
and the code inside is not invalidated. It is useful if the dirty
bits are used to track modified PTEs */