#define SMC_BITMAP_USE_THRESHOLD 10
-#define MMAP_AREA_START 0x00000000
-#define MMAP_AREA_END 0xa8000000
-
#if defined(TARGET_SPARC64)
#define TARGET_PHYS_ADDR_SPACE_BITS 41
#elif defined(TARGET_SPARC)
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
-static int io_mem_nb;
+static char io_mem_used[IO_MEM_NB_ENTRIES];
static int io_mem_watch;
#endif
return NULL;
pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
*lp = pd;
- for (i = 0; i < L2_SIZE; i++)
+ for (i = 0; i < L2_SIZE; i++) {
pd[i].phys_offset = IO_MEM_UNASSIGNED;
+ pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
+ }
}
return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
}
exit(1);
}
}
-#elif defined(__FreeBSD__)
+#elif defined(__FreeBSD__) || defined(__DragonFly__)
{
int flags;
void *addr = NULL;
}
#else
code_gen_buffer = qemu_malloc(code_gen_buffer_size);
- if (!code_gen_buffer) {
- fprintf(stderr, "Could not allocate dynamic translator buffer\n");
- exit(1);
- }
map_exec(code_gen_buffer, code_gen_buffer_size);
#endif
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
qemu_get_be32s(f, &env->halted);
qemu_get_be32s(f, &env->interrupt_request);
+ /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
+ version_id is increased. */
+ env->interrupt_request &= ~0x01;
tlb_flush(env, 1);
return 0;
CPUState **penv;
int cpu_index;
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_lock();
+#endif
env->next_cpu = NULL;
penv = &first_cpu;
cpu_index = 0;
TAILQ_INIT(&env->breakpoints);
TAILQ_INIT(&env->watchpoints);
*penv = env;
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_unlock();
+#endif
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
cpu_common_save, cpu_common_load, env);
TranslationBlock *tb;
p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
- if (!p->code_bitmap)
- return;
tb = p->first_tb;
while (tb != NULL) {
return -EINVAL;
}
wp = qemu_malloc(sizeof(*wp));
- if (!wp)
- return -ENOMEM;
wp->vaddr = addr;
wp->len_mask = len_mask;
CPUBreakpoint *bp;
bp = qemu_malloc(sizeof(*bp));
- if (!bp)
- return -ENOMEM;
bp->pc = pc;
bp->flags = flags;
#if defined(TARGET_HAS_ICE)
if (env->singlestep_enabled != enabled) {
env->singlestep_enabled = enabled;
- /* must flush all the translated code to avoid inconsistancies */
- /* XXX: only flush what is necessary */
- tb_flush(env);
+ if (kvm_enabled())
+ kvm_update_guest_debug(env, 0);
+ else {
+ /* must flush all the translated code to avoid inconsistancies */
+ /* XXX: only flush what is necessary */
+ tb_flush(env);
+ }
}
#endif
}
cpu_set_log(loglevel);
}
-/* mask must never be zero, except for A20 change call */
-void cpu_interrupt(CPUState *env, int mask)
+static void cpu_unlink_tb(CPUState *env)
{
-#if !defined(USE_NPTL)
+#if defined(USE_NPTL)
+ /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
+ problem and hope the cpu will stop of its own accord. For userspace
+ emulation this often isn't actually as bad as it sounds. Often
+ signals are used primarily to interrupt blocking syscalls. */
+#else
TranslationBlock *tb;
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
+
+ tb = env->current_tb;
+ /* if the cpu is currently executing code, we must unlink it and
+ all the potentially executing TB */
+ if (tb && !testandset(&interrupt_lock)) {
+ env->current_tb = NULL;
+ tb_reset_jump_recursive(tb);
+ resetlock(&interrupt_lock);
+ }
#endif
+}
+
+/* mask must never be zero, except for A20 change call */
+void cpu_interrupt(CPUState *env, int mask)
+{
int old_mask;
old_mask = env->interrupt_request;
- /* FIXME: This is probably not threadsafe. A different thread could
- be in the middle of a read-modify-write operation. */
env->interrupt_request |= mask;
-#if defined(USE_NPTL)
- /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
- problem and hope the cpu will stop of its own accord. For userspace
- emulation this often isn't actually as bad as it sounds. Often
- signals are used primarily to interrupt blocking syscalls. */
-#else
+
if (use_icount) {
env->icount_decr.u16.high = 0xffff;
#ifndef CONFIG_USER_ONLY
- /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
- an async event happened and we need to process it. */
if (!can_do_io(env)
- && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
+ && (mask & ~old_mask) != 0) {
cpu_abort(env, "Raised interrupt while not in I/O function");
}
#endif
} else {
- tb = env->current_tb;
- /* if the cpu is currently executing code, we must unlink it and
- all the potentially executing TB */
- if (tb && !testandset(&interrupt_lock)) {
- env->current_tb = NULL;
- tb_reset_jump_recursive(tb);
- resetlock(&interrupt_lock);
- }
+ cpu_unlink_tb(env);
}
-#endif
}
void cpu_reset_interrupt(CPUState *env, int mask)
env->interrupt_request &= ~mask;
}
+void cpu_exit(CPUState *env)
+{
+ env->exit_request = 1;
+ cpu_unlink_tb(env);
+}
+
const CPULogItem cpu_log_items[] = {
{ CPU_LOG_TB_OUT_ASM, "out_asm",
"show generated host assembly code for each compiled TB" },
#ifdef TARGET_I386
{ CPU_LOG_PCALL, "pcall",
"show protected mode far calls/returns/exceptions" },
+ { CPU_LOG_RESET, "cpu_reset",
+ "show CPU state before CPU resets" },
#endif
#ifdef DEBUG_IOPORT
{ CPU_LOG_IOPORT, "ioport",
if (kvm_enabled())
kvm_set_phys_mem(start_addr, size, phys_offset);
+ if (phys_offset == IO_MEM_UNASSIGNED) {
+ region_offset = start_addr;
+ }
region_offset &= TARGET_PAGE_MASK;
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + (target_phys_addr_t)size;
if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
subpage = subpage_init((addr & TARGET_PAGE_MASK),
&p->phys_offset, IO_MEM_UNASSIGNED,
- 0);
+ addr & TARGET_PAGE_MASK);
subpage_register(subpage, start_addr2, end_addr2,
phys_offset, region_offset);
p->region_offset = 0;
int subpage_memory;
mmio = qemu_mallocz(sizeof(subpage_t));
- if (mmio != NULL) {
- mmio->base = base;
- subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
+
+ mmio->base = base;
+ subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
#if defined(DEBUG_SUBPAGE)
- printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
- mmio, base, TARGET_PAGE_SIZE, subpage_memory);
+ printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
+ mmio, base, TARGET_PAGE_SIZE, subpage_memory);
#endif
- *phys = subpage_memory | IO_MEM_SUBPAGE;
- subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
+ *phys = subpage_memory | IO_MEM_SUBPAGE;
+ subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
region_offset);
- }
return mmio;
}
+static int get_free_io_mem_idx(void)
+{
+ int i;
+
+ for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
+ if (!io_mem_used[i]) {
+ io_mem_used[i] = 1;
+ return i;
+ }
+
+ return -1;
+}
+
static void io_mem_init(void)
{
+ int i;
+
cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
- io_mem_nb = 5;
+ for (i=0; i<5; i++)
+ io_mem_used[i] = 1;
io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
watch_mem_write, NULL);
int i, subwidth = 0;
if (io_index <= 0) {
- if (io_mem_nb >= IO_MEM_NB_ENTRIES)
- return -1;
- io_index = io_mem_nb++;
+ io_index = get_free_io_mem_idx();
+ if (io_index == -1)
+ return io_index;
} else {
if (io_index >= IO_MEM_NB_ENTRIES)
return -1;
return (io_index << IO_MEM_SHIFT) | subwidth;
}
+void cpu_unregister_io_memory(int io_table_address)
+{
+ int i;
+ int io_index = io_table_address >> IO_MEM_SHIFT;
+
+ for (i=0;i < 3; i++) {
+ io_mem_read[io_index][i] = unassigned_mem_read[i];
+ io_mem_write[io_index][i] = unassigned_mem_write[i];
+ }
+ io_mem_opaque[io_index] = NULL;
+ io_mem_used[io_index] = 0;
+}
+
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
{
return io_mem_write[io_index >> IO_MEM_SHIFT];
if (is_write) {
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+ target_phys_addr_t addr1 = addr;
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p)
- addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
/* XXX: could force cpu_single_env to NULL to avoid
potential bugs */
- if (l >= 4 && ((addr & 3) == 0)) {
+ if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit write access */
val = ldl_p(buf);
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
l = 4;
- } else if (l >= 2 && ((addr & 1) == 0)) {
+ } else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit write access */
val = lduw_p(buf);
- io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
+ io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
l = 2;
} else {
/* 8 bit write access */
val = ldub_p(buf);
- io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
+ io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
l = 1;
}
} else {
} else {
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
!(pd & IO_MEM_ROMD)) {
+ target_phys_addr_t addr1 = addr;
/* I/O case */
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p)
- addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
- if (l >= 4 && ((addr & 3) == 0)) {
+ addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit read access */
- val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
stl_p(buf, val);
l = 4;
- } else if (l >= 2 && ((addr & 1) == 0)) {
+ } else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit read access */
- val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
+ val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
stw_p(buf, val);
l = 2;
} else {
/* 8 bit read access */
- val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
+ val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
stb_p(buf, val);
l = 1;
}
}
}
+typedef struct {
+ void *buffer;
+ target_phys_addr_t addr;
+ target_phys_addr_t len;
+} BounceBuffer;
+
+static BounceBuffer bounce;
+
+typedef struct MapClient {
+ void *opaque;
+ void (*callback)(void *opaque);
+ LIST_ENTRY(MapClient) link;
+} MapClient;
+
+static LIST_HEAD(map_client_list, MapClient) map_client_list
+ = LIST_HEAD_INITIALIZER(map_client_list);
+
+void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
+{
+ MapClient *client = qemu_malloc(sizeof(*client));
+
+ client->opaque = opaque;
+ client->callback = callback;
+ LIST_INSERT_HEAD(&map_client_list, client, link);
+ return client;
+}
+
+void cpu_unregister_map_client(void *_client)
+{
+ MapClient *client = (MapClient *)_client;
+
+ LIST_REMOVE(client, link);
+}
+
+static void cpu_notify_map_clients(void)
+{
+ MapClient *client;
+
+ while (!LIST_EMPTY(&map_client_list)) {
+ client = LIST_FIRST(&map_client_list);
+ client->callback(client->opaque);
+ LIST_REMOVE(client, link);
+ }
+}
+
+/* Map a physical memory region into a host virtual address.
+ * May map a subset of the requested range, given by and returned in *plen.
+ * May return NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ */
+void *cpu_physical_memory_map(target_phys_addr_t addr,
+ target_phys_addr_t *plen,
+ int is_write)
+{
+ target_phys_addr_t len = *plen;
+ target_phys_addr_t done = 0;
+ int l;
+ uint8_t *ret = NULL;
+ uint8_t *ptr;
+ target_phys_addr_t page;
+ unsigned long pd;
+ PhysPageDesc *p;
+ unsigned long addr1;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ p = phys_page_find(page >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+ if (done || bounce.buffer) {
+ break;
+ }
+ bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
+ bounce.addr = addr;
+ bounce.len = l;
+ if (!is_write) {
+ cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
+ }
+ ptr = bounce.buffer;
+ } else {
+ addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ ptr = phys_ram_base + addr1;
+ }
+ if (!done) {
+ ret = ptr;
+ } else if (ret + done != ptr) {
+ break;
+ }
+
+ len -= l;
+ addr += l;
+ done += l;
+ }
+ *plen = done;
+ return ret;
+}
+
+/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
+ * Will also mark the memory as dirty if is_write == 1. access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ */
+void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
+ int is_write, target_phys_addr_t access_len)
+{
+ if (buffer != bounce.buffer) {
+ if (is_write) {
+ unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
+ while (access_len) {
+ unsigned l;
+ l = TARGET_PAGE_SIZE;
+ if (l > access_len)
+ l = access_len;
+ if (!cpu_physical_memory_is_dirty(addr1)) {
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
+ /* set dirty bit */
+ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
+ (0xff & ~CODE_DIRTY_FLAG);
+ }
+ addr1 += l;
+ access_len -= l;
+ }
+ }
+ return;
+ }
+ if (is_write) {
+ cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
+ }
+ qemu_free(bounce.buffer);
+ bounce.buffer = NULL;
+ cpu_notify_map_clients();
+}
/* warning: addr must be aligned */
uint32_t ldl_phys(target_phys_addr_t addr)
#endif
-/* virtual memory access for debug */
+/* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
uint8_t *buf, int len, int is_write)
{
l = (page + TARGET_PAGE_SIZE) - addr;
if (l > len)
l = len;
- cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
- buf, l, is_write);
+ phys_addr += (addr & ~TARGET_PAGE_MASK);
+#if !defined(CONFIG_USER_ONLY)
+ if (is_write)
+ cpu_physical_memory_write_rom(phys_addr, buf, l);
+ else
+#endif
+ cpu_physical_memory_rw(phys_addr, buf, l, is_write);
len -= l;
buf += l;
addr += l;