2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
27 #include <sys/types.h>
30 #include <linux/mman.h>
31 #include <linux/unistd.h>
34 #include "qemu-common.h"
39 pthread_mutex_t mmap_mutex;
40 static int __thread mmap_lock_count;
44 if (mmap_lock_count++ == 0) {
45 pthread_mutex_lock(&mmap_mutex);
49 void mmap_unlock(void)
51 if (--mmap_lock_count == 0) {
52 pthread_mutex_unlock(&mmap_mutex);
56 /* Grab lock to make sure things are in a consistent state after fork(). */
57 void mmap_fork_start(void)
61 pthread_mutex_lock(&mmap_mutex);
64 void mmap_fork_end(int child)
67 pthread_mutex_init(&mmap_mutex, NULL);
69 pthread_mutex_unlock(&mmap_mutex);
72 /* We aren't threadsafe to start with, so no need to worry about locking. */
77 void mmap_unlock(void)
82 void *qemu_vmalloc(size_t size)
87 /* Use map and mark the pages as used. */
88 p = mmap(NULL, size, PROT_READ | PROT_WRITE,
89 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
91 addr = (unsigned long)p;
92 if (addr == (target_ulong) addr) {
93 /* Allocated region overlaps guest address space.
95 page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
103 void *qemu_malloc(size_t size)
107 p = qemu_vmalloc(size);
112 /* We use map, which is always zero initialized. */
113 void * qemu_mallocz(size_t size)
115 return qemu_malloc(size);
118 void qemu_free(void *ptr)
120 /* FIXME: We should unmark the reserved pages here. However this gets
121 complicated when one target page spans multiple host pages, so we
124 p = (size_t *)((char *)ptr - 16);
128 void *qemu_realloc(void *ptr, size_t size)
130 size_t old_size, copy;
134 return qemu_malloc(size);
135 old_size = *(size_t *)((char *)ptr - 16);
136 copy = old_size < size ? old_size : size;
137 new_ptr = qemu_malloc(size);
138 memcpy(new_ptr, ptr, copy);
143 /* NOTE: all the constants are the HOST ones, but addresses are target. */
144 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
146 abi_ulong end, host_start, host_end, addr;
150 printf("mprotect: start=0x" TARGET_FMT_lx
151 "len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len,
152 prot & PROT_READ ? 'r' : '-',
153 prot & PROT_WRITE ? 'w' : '-',
154 prot & PROT_EXEC ? 'x' : '-');
157 if ((start & ~TARGET_PAGE_MASK) != 0)
159 len = TARGET_PAGE_ALIGN(len);
163 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
168 host_start = start & qemu_host_page_mask;
169 host_end = HOST_PAGE_ALIGN(end);
170 if (start > host_start) {
171 /* handle host page containing start */
173 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
174 prot1 |= page_get_flags(addr);
176 if (host_end == host_start + qemu_host_page_size) {
177 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
178 prot1 |= page_get_flags(addr);
182 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
185 host_start += qemu_host_page_size;
187 if (end < host_end) {
189 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
190 prot1 |= page_get_flags(addr);
192 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
196 host_end -= qemu_host_page_size;
199 /* handle the pages in the middle */
200 if (host_start < host_end) {
201 ret = mprotect(g2h(host_start), host_end - host_start, prot);
205 page_set_flags(start, start + len, prot | PAGE_VALID);
213 /* map an incomplete host page */
214 static int mmap_frag(abi_ulong real_start,
215 abi_ulong start, abi_ulong end,
216 int prot, int flags, int fd, abi_ulong offset)
218 abi_ulong real_end, addr;
222 real_end = real_start + qemu_host_page_size;
223 host_start = g2h(real_start);
225 /* get the protection of the target pages outside the mapping */
227 for(addr = real_start; addr < real_end; addr++) {
228 if (addr < start || addr >= end)
229 prot1 |= page_get_flags(addr);
233 /* no page was there, so we allocate one */
234 void *p = mmap(host_start, qemu_host_page_size, prot,
235 flags | MAP_ANONYMOUS, -1, 0);
242 prot_new = prot | prot1;
243 if (!(flags & MAP_ANONYMOUS)) {
244 /* msync() won't work here, so we return an error if write is
245 possible while it is a shared mapping */
246 if ((flags & MAP_TYPE) == MAP_SHARED &&
250 /* adjust protection to be able to read */
251 if (!(prot1 & PROT_WRITE))
252 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
254 /* read the corresponding file data */
255 pread(fd, g2h(start), end - start, offset);
257 /* put final protection */
258 if (prot_new != (prot1 | PROT_WRITE))
259 mprotect(host_start, qemu_host_page_size, prot_new);
261 /* just update the protection */
262 if (prot_new != prot1) {
263 mprotect(host_start, qemu_host_page_size, prot_new);
269 #if defined(__CYGWIN__)
270 /* Cygwin doesn't have a whole lot of address space. */
271 static abi_ulong mmap_next_start = 0x18000000;
273 static abi_ulong mmap_next_start = 0x40000000;
276 unsigned long last_brk;
279 * Find and reserve a free memory area of size 'size'. The search
281 * It must be called with mmap_lock() held.
282 * Return -1 if error.
284 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
289 size = HOST_PAGE_ALIGN(size);
290 start &= qemu_host_page_mask;
292 /* If 'start' == 0, then a default start address is used. */
294 start = mmap_next_start;
300 * Reserve needed memory area to avoid a race.
301 * It should be discarded using:
302 * - mmap() with MAP_FIXED flag
303 * - mremap() with MREMAP_FIXED flag
304 * - shmat() with SHM_REMAP flag
306 ptr = mmap((void *)(unsigned long)addr, size, PROT_NONE,
307 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
309 /* ENOMEM, if host address space has no memory */
310 if (ptr == MAP_FAILED)
311 return (abi_ulong)-1;
313 /* If address fits target address space we've found what we need */
314 if ((unsigned long)ptr + size - 1 <= (abi_ulong)-1)
317 /* Unmap and try again with new page */
319 addr += qemu_host_page_size;
321 /* ENOMEM if we check whole of target address space */
323 return (abi_ulong)-1;
326 /* Update default start address */
327 if (start == mmap_next_start)
328 mmap_next_start = (unsigned long)ptr + size;
333 /* NOTE: all the constants are the HOST ones */
334 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
335 int flags, int fd, abi_ulong offset)
337 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
338 unsigned long host_start;
343 printf("mmap: start=0x" TARGET_FMT_lx
344 " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=",
346 prot & PROT_READ ? 'r' : '-',
347 prot & PROT_WRITE ? 'w' : '-',
348 prot & PROT_EXEC ? 'x' : '-');
349 if (flags & MAP_FIXED)
350 printf("MAP_FIXED ");
351 if (flags & MAP_ANONYMOUS)
353 switch(flags & MAP_TYPE) {
355 printf("MAP_PRIVATE ");
358 printf("MAP_SHARED ");
361 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
364 printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
368 if (offset & ~TARGET_PAGE_MASK) {
373 len = TARGET_PAGE_ALIGN(len);
376 real_start = start & qemu_host_page_mask;
378 /* When mapping files into a memory area larger than the file, accesses
379 to pages beyond the file size will cause a SIGBUS.
381 For example, if mmaping a file of 100 bytes on a host with 4K pages
382 emulating a target with 8K pages, the target expects to be able to
383 access the first 8K. But the host will trap us on any access beyond
386 When emulating a target with a larger page-size than the hosts, we
387 may need to truncate file maps at EOF and add extra anonymous pages
388 up to the targets page boundary. */
390 if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
391 && !(flags & MAP_ANONYMOUS)) {
394 if (fstat (fd, &sb) == -1)
397 /* Are we trying to create a map beyond EOF?. */
398 if (offset + len > sb.st_size) {
399 /* If so, truncate the file map at eof aligned with
400 the hosts real pagesize. Additional anonymous maps
401 will be created beyond EOF. */
402 len = (sb.st_size - offset);
403 len += qemu_real_host_page_size - 1;
404 len &= ~(qemu_real_host_page_size - 1);
408 if (!(flags & MAP_FIXED)) {
409 abi_ulong mmap_start;
411 host_offset = offset & qemu_host_page_mask;
412 host_len = len + offset - host_offset;
413 host_len = HOST_PAGE_ALIGN(host_len);
414 mmap_start = mmap_find_vma(real_start, host_len);
415 if (mmap_start == (abi_ulong)-1) {
419 /* Note: we prefer to control the mapping address. It is
420 especially important if qemu_host_page_size >
421 qemu_real_host_page_size */
422 p = mmap(g2h(mmap_start),
423 host_len, prot, flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
426 /* update start so that it points to the file position at 'offset' */
427 host_start = (unsigned long)p;
428 if (!(flags & MAP_ANONYMOUS)) {
429 p = mmap(g2h(mmap_start), len, prot,
430 flags | MAP_FIXED, fd, host_offset);
431 host_start += offset - host_offset;
433 start = h2g(host_start);
438 if (start & ~TARGET_PAGE_MASK) {
443 real_end = HOST_PAGE_ALIGN(end);
446 * Test if requested memory area fits target address space
447 * It can fail only on 64-bit host with 32-bit target.
448 * On any other target/host host mmap() handles this error correctly.
450 if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
455 for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
456 flg = page_get_flags(addr);
457 if (flg & PAGE_RESERVED) {
463 /* worst case: we cannot map the file because the offset is not
464 aligned, so we read it */
465 if (!(flags & MAP_ANONYMOUS) &&
466 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
467 /* msync() won't work here, so we return an error if write is
468 possible while it is a shared mapping */
469 if ((flags & MAP_TYPE) == MAP_SHARED &&
470 (prot & PROT_WRITE)) {
474 retaddr = target_mmap(start, len, prot | PROT_WRITE,
475 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
479 pread(fd, g2h(start), len, offset);
480 if (!(prot & PROT_WRITE)) {
481 ret = target_mprotect(start, len, prot);
490 /* handle the start of the mapping */
491 if (start > real_start) {
492 if (real_end == real_start + qemu_host_page_size) {
493 /* one single host page */
494 ret = mmap_frag(real_start, start, end,
495 prot, flags, fd, offset);
500 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
501 prot, flags, fd, offset);
504 real_start += qemu_host_page_size;
506 /* handle the end of the mapping */
507 if (end < real_end) {
508 ret = mmap_frag(real_end - qemu_host_page_size,
509 real_end - qemu_host_page_size, real_end,
511 offset + real_end - qemu_host_page_size - start);
514 real_end -= qemu_host_page_size;
517 /* map the middle (easier) */
518 if (real_start < real_end) {
520 unsigned long offset1;
521 if (flags & MAP_ANONYMOUS)
524 offset1 = offset + real_start - start;
525 p = mmap(g2h(real_start), real_end - real_start,
526 prot, flags, fd, offset1);
532 page_set_flags(start, start + len, prot | PAGE_VALID);
535 printf("ret=0x" TARGET_FMT_lx "\n", start);
546 int target_munmap(abi_ulong start, abi_ulong len)
548 abi_ulong end, real_start, real_end, addr;
552 printf("munmap: start=0x%lx len=0x%lx\n", start, len);
554 if (start & ~TARGET_PAGE_MASK)
556 len = TARGET_PAGE_ALIGN(len);
561 real_start = start & qemu_host_page_mask;
562 real_end = HOST_PAGE_ALIGN(end);
564 if (start > real_start) {
565 /* handle host page containing start */
567 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
568 prot |= page_get_flags(addr);
570 if (real_end == real_start + qemu_host_page_size) {
571 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
572 prot |= page_get_flags(addr);
577 real_start += qemu_host_page_size;
579 if (end < real_end) {
581 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
582 prot |= page_get_flags(addr);
585 real_end -= qemu_host_page_size;
589 /* unmap what we can */
590 if (real_start < real_end) {
591 ret = munmap(g2h(real_start), real_end - real_start);
595 page_set_flags(start, start + len, 0);
600 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
601 abi_ulong new_size, unsigned long flags,
609 if (flags & MREMAP_FIXED)
610 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
614 else if (flags & MREMAP_MAYMOVE) {
615 abi_ulong mmap_start;
617 mmap_start = mmap_find_vma(0, new_size);
619 if (mmap_start == -1) {
621 host_addr = MAP_FAILED;
623 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
625 flags | MREMAP_FIXED,
628 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
629 /* Check if address fits target address space */
630 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
631 /* Revert mremap() changes */
632 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
634 host_addr = MAP_FAILED;
638 if (host_addr == MAP_FAILED) {
641 new_addr = h2g(host_addr);
642 prot = page_get_flags(old_addr);
643 page_set_flags(old_addr, old_addr + old_size, 0);
644 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
650 int target_msync(abi_ulong start, abi_ulong len, int flags)
654 if (start & ~TARGET_PAGE_MASK)
656 len = TARGET_PAGE_ALIGN(len);
663 start &= qemu_host_page_mask;
664 return msync(g2h(start), end - start, flags);