2 * On-chip DMA controller framework.
4 * Copyright (C) 2008 Nokia Corporation
5 * Written by Andrzej Zaborowski <andrew@openedhand.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 or
10 * (at your option) version 3 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 #include "qemu-common.h"
22 #include "qemu-timer.h"
26 static void transfer_mem2mem(struct soc_dma_ch_s *ch)
28 memcpy(ch->paddr[0], ch->paddr[1], ch->bytes);
29 ch->paddr[0] += ch->bytes;
30 ch->paddr[1] += ch->bytes;
33 static void transfer_mem2fifo(struct soc_dma_ch_s *ch)
35 ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes);
36 ch->paddr[0] += ch->bytes;
39 static void transfer_fifo2mem(struct soc_dma_ch_s *ch)
41 ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes);
42 ch->paddr[1] += ch->bytes;
45 /* This is further optimisable but isn't very important because often
46 * DMA peripherals forbid this kind of transfers and even when they don't,
47 * oprating systems may not need to use them. */
48 static void *fifo_buf;
50 static void transfer_fifo2fifo(struct soc_dma_ch_s *ch)
52 if (ch->bytes > fifo_size)
53 fifo_buf = qemu_realloc(fifo_buf, fifo_size = ch->bytes);
55 /* Implement as transfer_fifo2linear + transfer_linear2fifo. */
56 ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes);
57 ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes);
63 uint64_t ch_enable_mask;
67 struct memmap_entry_s {
68 enum soc_dma_port_type type;
69 target_phys_addr_t addr;
84 struct soc_dma_ch_s ch[0];
87 static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
89 int64_t now = qemu_get_clock(vm_clock);
90 struct dma_s *dma = (struct dma_s *) ch->dma;
92 qemu_mod_timer(ch->timer, now + delay_bytes / dma->channel_freq);
95 static void soc_dma_ch_run(void *opaque)
97 struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque;
100 ch->dma->setup_fn(ch);
105 soc_dma_ch_schedule(ch, ch->bytes);
109 static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma,
110 target_phys_addr_t addr)
112 struct memmap_entry_s *lo;
116 hi = dma->memmap_size;
120 if (lo[hi].addr <= addr)
127 static inline enum soc_dma_port_type soc_dma_ch_update_type(
128 struct soc_dma_ch_s *ch, int port)
130 struct dma_s *dma = (struct dma_s *) ch->dma;
131 struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]);
133 if (entry->type == soc_dma_port_fifo) {
134 while (entry < dma->memmap + dma->memmap_size &&
135 entry->u.fifo.out != port)
137 if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port)
138 return soc_dma_port_other;
140 if (ch->type[port] != soc_dma_access_const)
141 return soc_dma_port_other;
143 ch->io_fn[port] = entry->u.fifo.fn;
144 ch->io_opaque[port] = entry->u.fifo.opaque;
145 return soc_dma_port_fifo;
146 } else if (entry->type == soc_dma_port_mem) {
147 if (entry->addr > ch->vaddr[port] ||
148 entry->addr + entry->u.mem.size <= ch->vaddr[port])
149 return soc_dma_port_other;
151 /* TODO: support constant memory address for source port as used for
152 * drawing solid rectangles by PalmOS(R). */
153 if (ch->type[port] != soc_dma_access_const)
154 return soc_dma_port_other;
156 ch->paddr[port] = (uint8_t *) entry->u.mem.base +
157 (ch->vaddr[port] - entry->addr);
158 /* TODO: save bytes left to the end of the mapping somewhere so we
159 * can check we're not reading beyond it. */
160 return soc_dma_port_mem;
162 return soc_dma_port_other;
165 void soc_dma_ch_update(struct soc_dma_ch_s *ch)
167 enum soc_dma_port_type src, dst;
169 src = soc_dma_ch_update_type(ch, 0);
170 if (src == soc_dma_port_other) {
172 ch->transfer_fn = ch->dma->transfer_fn;
175 dst = soc_dma_ch_update_type(ch, 1);
177 /* TODO: use src and dst as array indices. */
178 if (src == soc_dma_port_mem && dst == soc_dma_port_mem)
179 ch->transfer_fn = transfer_mem2mem;
180 else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo)
181 ch->transfer_fn = transfer_mem2fifo;
182 else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem)
183 ch->transfer_fn = transfer_fifo2mem;
184 else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo)
185 ch->transfer_fn = transfer_fifo2fifo;
187 ch->transfer_fn = ch->dma->transfer_fn;
189 ch->update = (dst != soc_dma_port_other);
192 static void soc_dma_ch_freq_update(struct dma_s *s)
194 if (s->enabled_count)
195 /* We completely ignore channel priorities and stuff */
196 s->channel_freq = s->soc.freq / s->enabled_count;
198 /* TODO: Signal that we want to disable the functional clock and let
199 * the platform code decide what to do with it, i.e. check that
200 * auto-idle is enabled in the clock controller and if we are stopping
201 * the clock, do the same with any parent clocks that had only one
202 * user keeping them on and auto-idle enabled. */;
205 void soc_dma_set_request(struct soc_dma_ch_s *ch, int level)
207 struct dma_s *dma = (struct dma_s *) ch->dma;
209 dma->enabled_count += level - ch->enable;
212 dma->ch_enable_mask |= 1 << ch->num;
214 dma->ch_enable_mask &= ~(1 << ch->num);
216 if (level != ch->enable) {
217 soc_dma_ch_freq_update(dma);
221 qemu_del_timer(ch->timer);
222 else if (!ch->running)
225 soc_dma_ch_schedule(ch, 1);
229 void soc_dma_reset(struct soc_dma_s *soc)
231 struct dma_s *s = (struct dma_s *) soc;
233 memset(s->soc.drqst, 0, sizeof(s->soc.drqst));
234 s->ch_enable_mask = 0;
235 s->enabled_count = 0;
236 soc_dma_ch_freq_update(s);
239 static void soc_dma_save_state(QEMUFile *f, void *opaque)
241 struct dma_s *s = (struct dma_s *)opaque;
244 qemu_put_buffer(f, s->soc.drqst, sizeof(s->soc.drqst));
245 qemu_put_sbe64(f, s->soc.freq);
246 qemu_put_be64(f, s->ch_enable_mask);
247 qemu_put_sbe64(f, s->channel_freq);
248 qemu_put_sbe32(f, s->enabled_count);
249 for (i = 0; i < s->chnum; i++) {
250 qemu_put_timer(f, s->ch[i].timer);
251 qemu_put_sbe32(f, s->ch[i].enable);
252 qemu_put_sbe32(f, s->ch[i].update);
253 qemu_put_sbe32(f, s->ch[i].bytes);
254 qemu_put_sbe32(f, s->ch[i].type[0]);
255 qemu_put_sbe32(f, s->ch[i].type[1]);
256 #if TARGET_PHYS_ADDR_BITS == 32
257 qemu_put_be32(f, s->ch[i].vaddr[0]);
258 qemu_put_be32(f, s->ch[i].vaddr[1]);
259 #elif TARGET_PHYS_ADDR_BITS == 64
260 qemu_put_be64(f, s->ch[i].vaddr[0]);
261 qemu_put_be64(f, s->ch[i].vaddr[1]);
263 #error TARGET_PHYS_ADDR_BITS undefined
265 qemu_put_sbe32(f, s->ch[i].running);
269 static int soc_dma_load_state(QEMUFile *f, void *opaque, int version_id)
271 struct dma_s *s = (struct dma_s *)opaque;
277 qemu_get_buffer(f, s->soc.drqst, sizeof(s->soc.drqst));
278 s->soc.freq = qemu_get_sbe64(f);
279 s->ch_enable_mask = qemu_get_be64(f);
280 s->channel_freq = qemu_get_sbe64(f);
281 s->enabled_count = qemu_get_sbe32(f);
282 for (i = 0; i < s->chnum; i++) {
283 qemu_get_timer(f, s->ch[i].timer);
284 s->ch[i].enable = qemu_get_sbe32(f);
285 s->ch[i].update = qemu_get_sbe32(f);
286 s->ch[i].bytes = qemu_get_sbe32(f);
287 s->ch[i].type[0] = qemu_get_sbe32(f);
288 s->ch[i].type[1] = qemu_get_sbe32(f);
289 #if TARGET_PHYS_ADDR_BITS == 32
290 s->ch[i].vaddr[0] = qemu_get_be32(f);
291 s->ch[i].vaddr[1] = qemu_get_be32(f);
292 #elif TARGET_PHYS_ADDR_BITS == 64
293 s->ch[i].vaddr[0] = qemu_get_be64(f);
294 s->ch[i].vaddr[1] = qemu_get_be64(f);
296 #error TARGET_PHYS_ADDR_BITS undefined
298 s->ch[i].running = qemu_get_sbe32(f);
300 soc_dma_ch_update(&s->ch[i]);
306 /* TODO: take a functional-clock argument */
307 struct soc_dma_s *soc_dma_init(int n)
310 struct dma_s *s = qemu_mallocz(sizeof(*s) + n * sizeof(*s->ch));
314 for (i = 0; i < n; i ++) {
315 s->ch[i].dma = &s->soc;
317 s->ch[i].timer = qemu_new_timer(vm_clock, soc_dma_ch_run, &s->ch[i]);
320 soc_dma_reset(&s->soc);
323 register_savevm("soc_dma", -1, 0,
324 soc_dma_save_state, soc_dma_load_state, s);
328 void soc_dma_port_add_fifo(struct soc_dma_s *soc, target_phys_addr_t virt_base,
329 soc_dma_io_t fn, void *opaque, int out)
331 struct memmap_entry_s *entry;
332 struct dma_s *dma = (struct dma_s *) soc;
334 dma->memmap = qemu_realloc(dma->memmap, sizeof(*entry) *
335 (dma->memmap_size + 1));
336 entry = soc_dma_lookup(dma, virt_base);
338 if (dma->memmap_size) {
339 if (entry->type == soc_dma_port_mem) {
340 if (entry->addr <= virt_base &&
341 entry->addr + entry->u.mem.size > virt_base) {
342 fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
343 " collides with RAM region at " TARGET_FMT_lx
344 "-" TARGET_FMT_lx "\n", __FUNCTION__,
345 (target_ulong) virt_base,
346 (target_ulong) entry->addr, (target_ulong)
347 (entry->addr + entry->u.mem.size));
351 if (entry->addr <= virt_base)
354 while (entry < dma->memmap + dma->memmap_size &&
355 entry->addr <= virt_base) {
356 if (entry->addr == virt_base && entry->u.fifo.out == out) {
357 fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
358 " collides FIFO at " TARGET_FMT_lx "\n",
359 __FUNCTION__, (target_ulong) virt_base,
360 (target_ulong) entry->addr);
367 memmove(entry + 1, entry,
368 (uint8_t *) (dma->memmap + dma->memmap_size ++) -
373 entry->addr = virt_base;
374 entry->type = soc_dma_port_fifo;
375 entry->u.fifo.fn = fn;
376 entry->u.fifo.opaque = opaque;
377 entry->u.fifo.out = out;
380 void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base,
381 target_phys_addr_t virt_base, size_t size)
383 struct memmap_entry_s *entry;
384 struct dma_s *dma = (struct dma_s *) soc;
386 dma->memmap = qemu_realloc(dma->memmap, sizeof(*entry) *
387 (dma->memmap_size + 1));
388 entry = soc_dma_lookup(dma, virt_base);
390 if (dma->memmap_size) {
391 if (entry->type == soc_dma_port_mem) {
392 if ((entry->addr >= virt_base && entry->addr < virt_base + size) ||
393 (entry->addr <= virt_base &&
394 entry->addr + entry->u.mem.size > virt_base)) {
395 fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
396 " collides with RAM region at " TARGET_FMT_lx
397 "-" TARGET_FMT_lx "\n", __FUNCTION__,
398 (target_ulong) virt_base,
399 (target_ulong) (virt_base + size),
400 (target_ulong) entry->addr, (target_ulong)
401 (entry->addr + entry->u.mem.size));
405 if (entry->addr <= virt_base)
408 if (entry->addr >= virt_base &&
409 entry->addr < virt_base + size) {
410 fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
411 " collides with FIFO at " TARGET_FMT_lx
413 (target_ulong) virt_base,
414 (target_ulong) (virt_base + size),
415 (target_ulong) entry->addr);
419 while (entry < dma->memmap + dma->memmap_size &&
420 entry->addr <= virt_base)
424 memmove(entry + 1, entry,
425 (uint8_t *) (dma->memmap + dma->memmap_size ++) -
430 entry->addr = virt_base;
431 entry->type = soc_dma_port_mem;
432 entry->u.mem.base = phys_base;
433 entry->u.mem.size = size;
436 /* TODO: port removal for ports like PCMCIA memory */