2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <linux/mman.h>
28 #include <linux/unistd.h>
31 #include "qemu-common.h"
36 pthread_mutex_t mmap_mutex;
37 static int __thread mmap_lock_count;
41 if (mmap_lock_count++ == 0) {
42 pthread_mutex_lock(&mmap_mutex);
46 void mmap_unlock(void)
48 if (--mmap_lock_count == 0) {
49 pthread_mutex_unlock(&mmap_mutex);
53 /* Grab lock to make sure things are in a consistent state after fork(). */
54 void mmap_fork_start(void)
58 pthread_mutex_lock(&mmap_mutex);
61 void mmap_fork_end(int child)
64 pthread_mutex_init(&mmap_mutex, NULL);
66 pthread_mutex_unlock(&mmap_mutex);
69 /* We aren't threadsafe to start with, so no need to worry about locking. */
74 void mmap_unlock(void)
79 void *qemu_vmalloc(size_t size)
84 /* Use map and mark the pages as used. */
85 p = mmap(NULL, size, PROT_READ | PROT_WRITE,
86 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
88 addr = (unsigned long)p;
89 if (addr == (target_ulong) addr) {
90 /* Allocated region overlaps guest address space.
92 page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
100 void *qemu_malloc(size_t size)
104 p = qemu_vmalloc(size);
109 /* We use map, which is always zero initialized. */
110 void * qemu_mallocz(size_t size)
112 return qemu_malloc(size);
115 void qemu_free(void *ptr)
117 /* FIXME: We should unmark the reserved pages here. However this gets
118 complicated when one target page spans multiple host pages, so we
121 p = (size_t *)((char *)ptr - 16);
125 /* NOTE: all the constants are the HOST ones, but addresses are target. */
126 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
128 abi_ulong end, host_start, host_end, addr;
132 printf("mprotect: start=0x" TARGET_FMT_lx
133 "len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len,
134 prot & PROT_READ ? 'r' : '-',
135 prot & PROT_WRITE ? 'w' : '-',
136 prot & PROT_EXEC ? 'x' : '-');
139 if ((start & ~TARGET_PAGE_MASK) != 0)
141 len = TARGET_PAGE_ALIGN(len);
145 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
150 host_start = start & qemu_host_page_mask;
151 host_end = HOST_PAGE_ALIGN(end);
152 if (start > host_start) {
153 /* handle host page containing start */
155 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
156 prot1 |= page_get_flags(addr);
158 if (host_end == host_start + qemu_host_page_size) {
159 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
160 prot1 |= page_get_flags(addr);
164 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
167 host_start += qemu_host_page_size;
169 if (end < host_end) {
171 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
172 prot1 |= page_get_flags(addr);
174 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
178 host_end -= qemu_host_page_size;
181 /* handle the pages in the middle */
182 if (host_start < host_end) {
183 ret = mprotect(g2h(host_start), host_end - host_start, prot);
187 page_set_flags(start, start + len, prot | PAGE_VALID);
195 /* map an incomplete host page */
196 static int mmap_frag(abi_ulong real_start,
197 abi_ulong start, abi_ulong end,
198 int prot, int flags, int fd, abi_ulong offset)
200 abi_ulong real_end, addr;
204 real_end = real_start + qemu_host_page_size;
205 host_start = g2h(real_start);
207 /* get the protection of the target pages outside the mapping */
209 for(addr = real_start; addr < real_end; addr++) {
210 if (addr < start || addr >= end)
211 prot1 |= page_get_flags(addr);
215 /* no page was there, so we allocate one */
216 void *p = mmap(host_start, qemu_host_page_size, prot,
217 flags | MAP_ANONYMOUS, -1, 0);
224 prot_new = prot | prot1;
225 if (!(flags & MAP_ANONYMOUS)) {
226 /* msync() won't work here, so we return an error if write is
227 possible while it is a shared mapping */
228 if ((flags & MAP_TYPE) == MAP_SHARED &&
232 /* adjust protection to be able to read */
233 if (!(prot1 & PROT_WRITE))
234 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
236 /* read the corresponding file data */
237 pread(fd, g2h(start), end - start, offset);
239 /* put final protection */
240 if (prot_new != (prot1 | PROT_WRITE))
241 mprotect(host_start, qemu_host_page_size, prot_new);
243 /* just update the protection */
244 if (prot_new != prot1) {
245 mprotect(host_start, qemu_host_page_size, prot_new);
251 #if defined(__CYGWIN__)
252 /* Cygwin doesn't have a whole lot of address space. */
253 static abi_ulong mmap_next_start = 0x18000000;
255 static abi_ulong mmap_next_start = 0x40000000;
258 unsigned long last_brk;
260 /* find a free memory area of size 'size'. The search starts at
261 'start'. If 'start' == 0, then a default start address is used.
264 /* page_init() marks pages used by the host as reserved to be sure not
266 static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
268 abi_ulong addr, addr1, addr_start;
270 unsigned long new_brk;
272 new_brk = (unsigned long)sbrk(0);
273 if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) {
274 /* This is a hack to catch the host allocating memory with brk().
275 If it uses mmap then we loose.
276 FIXME: We really want to avoid the host allocating memory in
277 the first place, and maybe leave some slack to avoid switching
279 page_set_flags(last_brk & TARGET_PAGE_MASK,
280 TARGET_PAGE_ALIGN(new_brk),
285 size = HOST_PAGE_ALIGN(size);
286 start = start & qemu_host_page_mask;
289 addr = mmap_next_start;
293 for(addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
294 prot |= page_get_flags(addr1);
298 addr += qemu_host_page_size;
299 /* we found nothing */
300 if (addr == addr_start)
301 return (abi_ulong)-1;
304 mmap_next_start = addr + size;
308 /* NOTE: all the constants are the HOST ones */
309 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
310 int flags, int fd, abi_ulong offset)
312 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
313 unsigned long host_start;
318 printf("mmap: start=0x" TARGET_FMT_lx
319 " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=",
321 prot & PROT_READ ? 'r' : '-',
322 prot & PROT_WRITE ? 'w' : '-',
323 prot & PROT_EXEC ? 'x' : '-');
324 if (flags & MAP_FIXED)
325 printf("MAP_FIXED ");
326 if (flags & MAP_ANONYMOUS)
328 switch(flags & MAP_TYPE) {
330 printf("MAP_PRIVATE ");
333 printf("MAP_SHARED ");
336 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
339 printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
343 if (offset & ~TARGET_PAGE_MASK) {
348 len = TARGET_PAGE_ALIGN(len);
351 real_start = start & qemu_host_page_mask;
353 if (!(flags & MAP_FIXED)) {
354 abi_ulong mmap_start;
356 host_offset = offset & qemu_host_page_mask;
357 host_len = len + offset - host_offset;
358 host_len = HOST_PAGE_ALIGN(host_len);
359 mmap_start = mmap_find_vma(real_start, host_len);
360 if (mmap_start == (abi_ulong)-1) {
364 /* Note: we prefer to control the mapping address. It is
365 especially important if qemu_host_page_size >
366 qemu_real_host_page_size */
367 p = mmap(g2h(mmap_start),
368 host_len, prot, flags | MAP_FIXED, fd, host_offset);
371 /* update start so that it points to the file position at 'offset' */
372 host_start = (unsigned long)p;
373 if (!(flags & MAP_ANONYMOUS))
374 host_start += offset - host_offset;
375 start = h2g(host_start);
380 if (start & ~TARGET_PAGE_MASK) {
385 real_end = HOST_PAGE_ALIGN(end);
388 * Test if requested memory area fits target address space
389 * It can fail only on 64-bit host with 32-bit target.
390 * On any other target/host host mmap() handles this error correctly.
392 if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
397 for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
398 flg = page_get_flags(addr);
399 if (flg & PAGE_RESERVED) {
405 /* worst case: we cannot map the file because the offset is not
406 aligned, so we read it */
407 if (!(flags & MAP_ANONYMOUS) &&
408 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
409 /* msync() won't work here, so we return an error if write is
410 possible while it is a shared mapping */
411 if ((flags & MAP_TYPE) == MAP_SHARED &&
412 (prot & PROT_WRITE)) {
416 retaddr = target_mmap(start, len, prot | PROT_WRITE,
417 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
421 pread(fd, g2h(start), len, offset);
422 if (!(prot & PROT_WRITE)) {
423 ret = target_mprotect(start, len, prot);
432 /* handle the start of the mapping */
433 if (start > real_start) {
434 if (real_end == real_start + qemu_host_page_size) {
435 /* one single host page */
436 ret = mmap_frag(real_start, start, end,
437 prot, flags, fd, offset);
442 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
443 prot, flags, fd, offset);
446 real_start += qemu_host_page_size;
448 /* handle the end of the mapping */
449 if (end < real_end) {
450 ret = mmap_frag(real_end - qemu_host_page_size,
451 real_end - qemu_host_page_size, real_end,
453 offset + real_end - qemu_host_page_size - start);
456 real_end -= qemu_host_page_size;
459 /* map the middle (easier) */
460 if (real_start < real_end) {
462 unsigned long offset1;
463 if (flags & MAP_ANONYMOUS)
466 offset1 = offset + real_start - start;
467 p = mmap(g2h(real_start), real_end - real_start,
468 prot, flags, fd, offset1);
474 page_set_flags(start, start + len, prot | PAGE_VALID);
477 printf("ret=0x" TARGET_FMT_lx "\n", start);
488 int target_munmap(abi_ulong start, abi_ulong len)
490 abi_ulong end, real_start, real_end, addr;
494 printf("munmap: start=0x%lx len=0x%lx\n", start, len);
496 if (start & ~TARGET_PAGE_MASK)
498 len = TARGET_PAGE_ALIGN(len);
503 real_start = start & qemu_host_page_mask;
504 real_end = HOST_PAGE_ALIGN(end);
506 if (start > real_start) {
507 /* handle host page containing start */
509 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
510 prot |= page_get_flags(addr);
512 if (real_end == real_start + qemu_host_page_size) {
513 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
514 prot |= page_get_flags(addr);
519 real_start += qemu_host_page_size;
521 if (end < real_end) {
523 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
524 prot |= page_get_flags(addr);
527 real_end -= qemu_host_page_size;
531 /* unmap what we can */
532 if (real_start < real_end) {
533 ret = munmap(g2h(real_start), real_end - real_start);
537 page_set_flags(start, start + len, 0);
542 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
543 abi_ulong new_size, unsigned long flags,
551 if (flags & MREMAP_FIXED)
552 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
556 else if (flags & MREMAP_MAYMOVE) {
557 abi_ulong mmap_start;
559 mmap_start = mmap_find_vma(0, new_size);
561 if (mmap_start == -1) {
563 host_addr = MAP_FAILED;
565 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
567 flags | MREMAP_FIXED,
570 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
571 /* Check if address fits target address space */
572 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
573 /* Revert mremap() changes */
574 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
576 host_addr = MAP_FAILED;
580 if (host_addr == MAP_FAILED) {
583 new_addr = h2g(host_addr);
584 prot = page_get_flags(old_addr);
585 page_set_flags(old_addr, old_addr + old_size, 0);
586 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
592 int target_msync(abi_ulong start, abi_ulong len, int flags)
596 if (start & ~TARGET_PAGE_MASK)
598 len = TARGET_PAGE_ALIGN(len);
605 start &= qemu_host_page_mask;
606 return msync(g2h(start), end - start, flags);