2 * QEMU ETRAX DMA Controller.
4 * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu-common.h"
30 #include "etraxfs_dma.h"
35 #define RW_SAVED_DATA 0x58
36 #define RW_SAVED_DATA_BUF 0x5c
38 #define RW_GROUP_DOWN 0x7c
42 #define RW_INTR_MASK 0x8c
43 #define RW_ACK_INTR 0x90
45 #define R_MASKED_INTR 0x98
46 #define RW_STREAM_CMD 0x9c
48 #define DMA_REG_MAX 0x100
52 // ------------------------------------------------------------ dma_descr_group
53 typedef struct dma_descr_group {
54 struct dma_descr_group *next;
65 struct dma_descr_group *up;
67 struct dma_descr_context *context;
68 struct dma_descr_group *group;
72 // ---------------------------------------------------------- dma_descr_context
73 typedef struct dma_descr_context {
74 struct dma_descr_context *next;
79 unsigned store_mode : 1;
88 struct dma_descr_data *saved_data;
92 // ------------------------------------------------------------- dma_descr_data
93 typedef struct dma_descr_data {
94 struct dma_descr_data *next;
111 regk_dma_ack_pkt = 0x00000100,
112 regk_dma_anytime = 0x00000001,
113 regk_dma_array = 0x00000008,
114 regk_dma_burst = 0x00000020,
115 regk_dma_client = 0x00000002,
116 regk_dma_copy_next = 0x00000010,
117 regk_dma_copy_up = 0x00000020,
118 regk_dma_data_at_eol = 0x00000001,
119 regk_dma_dis_c = 0x00000010,
120 regk_dma_dis_g = 0x00000020,
121 regk_dma_idle = 0x00000001,
122 regk_dma_intern = 0x00000004,
123 regk_dma_load_c = 0x00000200,
124 regk_dma_load_c_n = 0x00000280,
125 regk_dma_load_c_next = 0x00000240,
126 regk_dma_load_d = 0x00000140,
127 regk_dma_load_g = 0x00000300,
128 regk_dma_load_g_down = 0x000003c0,
129 regk_dma_load_g_next = 0x00000340,
130 regk_dma_load_g_up = 0x00000380,
131 regk_dma_next_en = 0x00000010,
132 regk_dma_next_pkt = 0x00000010,
133 regk_dma_no = 0x00000000,
134 regk_dma_only_at_wait = 0x00000000,
135 regk_dma_restore = 0x00000020,
136 regk_dma_rst = 0x00000001,
137 regk_dma_running = 0x00000004,
138 regk_dma_rw_cfg_default = 0x00000000,
139 regk_dma_rw_cmd_default = 0x00000000,
140 regk_dma_rw_intr_mask_default = 0x00000000,
141 regk_dma_rw_stat_default = 0x00000101,
142 regk_dma_rw_stream_cmd_default = 0x00000000,
143 regk_dma_save_down = 0x00000020,
144 regk_dma_save_up = 0x00000020,
145 regk_dma_set_reg = 0x00000050,
146 regk_dma_set_w_size1 = 0x00000190,
147 regk_dma_set_w_size2 = 0x000001a0,
148 regk_dma_set_w_size4 = 0x000001c0,
149 regk_dma_stopped = 0x00000002,
150 regk_dma_store_c = 0x00000002,
151 regk_dma_store_descr = 0x00000000,
152 regk_dma_store_g = 0x00000004,
153 regk_dma_store_md = 0x00000001,
154 regk_dma_sw = 0x00000008,
155 regk_dma_update_down = 0x00000020,
156 regk_dma_yes = 0x00000001
166 struct fs_dma_channel
170 struct etraxfs_dma_client *client;
173 /* Internal status. */
175 enum dma_ch_state state;
177 unsigned int input : 1;
178 unsigned int eol : 1;
180 struct dma_descr_group current_g;
181 struct dma_descr_context current_c;
182 struct dma_descr_data current_d;
184 /* Controll registers. */
185 uint32_t regs[DMA_REG_MAX];
191 target_phys_addr_t base;
194 struct fs_dma_channel *channels;
199 static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
201 return ctrl->channels[c].regs[reg];
204 static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
206 return channel_reg(ctrl, c, RW_CFG) & 2;
209 static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
211 return (channel_reg(ctrl, c, RW_CFG) & 1)
212 && ctrl->channels[c].client;
215 static inline int fs_channel(target_phys_addr_t base, target_phys_addr_t addr)
217 /* Every channel has a 0x2000 ctrl register map. */
218 return (addr - base) >> 13;
221 #ifdef USE_THIS_DEAD_CODE
222 static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
224 target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP);
226 /* Load and decode. FIXME: handle endianness. */
227 cpu_physical_memory_read (addr,
228 (void *) &ctrl->channels[c].current_g,
229 sizeof ctrl->channels[c].current_g);
232 static void dump_c(int ch, struct dma_descr_context *c)
234 printf("%s ch=%d\n", __func__, ch);
235 printf("next=%p\n", c->next);
236 printf("saved_data=%p\n", c->saved_data);
237 printf("saved_data_buf=%p\n", c->saved_data_buf);
238 printf("eol=%x\n", (uint32_t) c->eol);
241 static void dump_d(int ch, struct dma_descr_data *d)
243 printf("%s ch=%d\n", __func__, ch);
244 printf("next=%p\n", d->next);
245 printf("buf=%p\n", d->buf);
246 printf("after=%p\n", d->after);
247 printf("intr=%x\n", (uint32_t) d->intr);
248 printf("out_eop=%x\n", (uint32_t) d->out_eop);
249 printf("in_eop=%x\n", (uint32_t) d->in_eop);
250 printf("eol=%x\n", (uint32_t) d->eol);
254 static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
256 target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
258 /* Load and decode. FIXME: handle endianness. */
259 cpu_physical_memory_read (addr,
260 (void *) &ctrl->channels[c].current_c,
261 sizeof ctrl->channels[c].current_c);
263 D(dump_c(c, &ctrl->channels[c].current_c));
264 /* I guess this should update the current pos. */
265 ctrl->channels[c].regs[RW_SAVED_DATA] =
266 (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
267 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
268 (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
271 static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
273 target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA);
275 /* Load and decode. FIXME: handle endianness. */
276 D(printf("%s ch=%d addr=%x\n", __func__, c, addr));
277 cpu_physical_memory_read (addr,
278 (void *) &ctrl->channels[c].current_d,
279 sizeof ctrl->channels[c].current_d);
281 D(dump_d(c, &ctrl->channels[c].current_d));
282 ctrl->channels[c].regs[RW_DATA] = addr;
285 static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
287 target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
289 /* Encode and store. FIXME: handle endianness. */
290 D(printf("%s ch=%d addr=%x\n", __func__, c, addr));
291 D(dump_d(c, &ctrl->channels[c].current_d));
292 cpu_physical_memory_write (addr,
293 (void *) &ctrl->channels[c].current_c,
294 sizeof ctrl->channels[c].current_c);
297 static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
299 target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA);
301 /* Encode and store. FIXME: handle endianness. */
302 D(printf("%s ch=%d addr=%x\n", __func__, c, addr));
303 cpu_physical_memory_write (addr,
304 (void *) &ctrl->channels[c].current_d,
305 sizeof ctrl->channels[c].current_d);
308 static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
313 static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
315 if (ctrl->channels[c].client)
317 ctrl->channels[c].eol = 0;
318 ctrl->channels[c].state = RUNNING;
320 printf("WARNING: starting DMA ch %d with no client\n", c);
323 static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
325 if (!channel_en(ctrl, c)
326 || channel_stopped(ctrl, c)
327 || ctrl->channels[c].state != RUNNING
328 /* Only reload the current data descriptor if it has eol set. */
329 || !ctrl->channels[c].current_d.eol) {
330 D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n",
331 c, ctrl->channels[c].state,
332 channel_stopped(ctrl, c),
334 ctrl->channels[c].eol));
335 D(dump_d(c, &ctrl->channels[c].current_d));
339 /* Reload the current descriptor. */
340 channel_load_d(ctrl, c);
342 /* If the current descriptor cleared the eol flag and we had already
343 reached eol state, do the continue. */
344 if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
345 D(printf("continue %d ok %p\n", c,
346 ctrl->channels[c].current_d.next));
347 ctrl->channels[c].regs[RW_SAVED_DATA] =
348 (uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
349 channel_load_d(ctrl, c);
350 channel_start(ctrl, c);
352 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
353 (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
356 static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
358 unsigned int cmd = v & ((1 << 10) - 1);
360 D(printf("%s ch=%d cmd=%x\n",
362 if (cmd & regk_dma_load_d) {
363 channel_load_d(ctrl, c);
364 if (cmd & regk_dma_burst)
365 channel_start(ctrl, c);
368 if (cmd & regk_dma_load_c) {
369 channel_load_c(ctrl, c);
370 channel_start(ctrl, c);
374 static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
376 D(printf("%s %d\n", __func__, c));
377 ctrl->channels[c].regs[R_INTR] &=
378 ~(ctrl->channels[c].regs[RW_ACK_INTR]);
380 ctrl->channels[c].regs[R_MASKED_INTR] =
381 ctrl->channels[c].regs[R_INTR]
382 & ctrl->channels[c].regs[RW_INTR_MASK];
384 D(printf("%s: chan=%d masked_intr=%x\n", __func__,
386 ctrl->channels[c].regs[R_MASKED_INTR]));
388 if (ctrl->channels[c].regs[R_MASKED_INTR])
389 qemu_irq_raise(ctrl->channels[c].irq[0]);
391 qemu_irq_lower(ctrl->channels[c].irq[0]);
394 static void channel_out_run(struct fs_dma_ctrl *ctrl, int c)
397 uint32_t saved_data_buf;
398 unsigned char buf[2 * 1024];
400 while (ctrl->channels[c].eol != 1) {
401 saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
403 D(printf("ch=%d buf=%x after=%x saved_data_buf=%x\n",
405 (uint32_t)ctrl->channels[c].current_d.buf,
406 (uint32_t)ctrl->channels[c].current_d.after,
409 len = (uint32_t)ctrl->channels[c].current_d.after;
410 len -= saved_data_buf;
412 if (len > sizeof buf)
414 cpu_physical_memory_read (saved_data_buf, buf, len);
416 D(printf("channel %d pushes %x %u bytes\n", c,
417 saved_data_buf, len));
419 if (ctrl->channels[c].client->client.push)
420 ctrl->channels[c].client->client.push(
421 ctrl->channels[c].client->client.opaque,
424 printf("WARNING: DMA ch%d dataloss,"
425 " no attached client.\n", c);
427 saved_data_buf += len;
429 if (saved_data_buf ==
430 (uint32_t)ctrl->channels[c].current_d.after) {
431 /* Done. Step to next. */
432 if (ctrl->channels[c].current_d.out_eop) {
433 /* TODO: signal eop to the client. */
434 D(printf("signal eop\n"));
436 if (ctrl->channels[c].current_d.intr) {
437 /* TODO: signal eop to the client. */
439 D(printf("signal intr\n"));
440 ctrl->channels[c].regs[R_INTR] |= (1 << 2);
441 channel_update_irq(ctrl, c);
443 if (ctrl->channels[c].current_d.eol) {
444 D(printf("channel %d EOL\n", c));
445 ctrl->channels[c].eol = 1;
447 /* Mark the context as disabled. */
448 ctrl->channels[c].current_c.dis = 1;
449 channel_store_c(ctrl, c);
451 channel_stop(ctrl, c);
453 ctrl->channels[c].regs[RW_SAVED_DATA] =
454 (uint32_t)ctrl->channels[c].current_d.next;
455 /* Load new descriptor. */
456 channel_load_d(ctrl, c);
457 saved_data_buf = (uint32_t)(unsigned long)
458 ctrl->channels[c].current_d.buf;
461 channel_store_d(ctrl, c);
462 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
464 D(dump_d(c, &ctrl->channels[c].current_d));
466 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
470 static int channel_in_process(struct fs_dma_ctrl *ctrl, int c,
471 unsigned char *buf, int buflen, int eop)
474 uint32_t saved_data_buf;
476 if (ctrl->channels[c].eol == 1)
479 saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
480 len = (uint32_t)ctrl->channels[c].current_d.after;
481 len -= saved_data_buf;
486 cpu_physical_memory_write (saved_data_buf, buf, len);
487 saved_data_buf += len;
489 if (saved_data_buf ==
490 (uint32_t)ctrl->channels[c].current_d.after
492 uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
494 D(printf("in dscr end len=%d\n",
495 ctrl->channels[c].current_d.after
496 - ctrl->channels[c].current_d.buf));
497 ctrl->channels[c].current_d.after =
498 (void *)(unsigned long) saved_data_buf;
500 /* Done. Step to next. */
501 if (ctrl->channels[c].current_d.intr) {
502 /* TODO: signal eop to the client. */
504 ctrl->channels[c].regs[R_INTR] |= 3;
507 ctrl->channels[c].current_d.in_eop = 1;
508 ctrl->channels[c].regs[R_INTR] |= 8;
510 if (r_intr != ctrl->channels[c].regs[R_INTR])
511 channel_update_irq(ctrl, c);
513 channel_store_d(ctrl, c);
514 D(dump_d(c, &ctrl->channels[c].current_d));
516 if (ctrl->channels[c].current_d.eol) {
517 D(printf("channel %d EOL\n", c));
518 ctrl->channels[c].eol = 1;
520 /* Mark the context as disabled. */
521 ctrl->channels[c].current_c.dis = 1;
522 channel_store_c(ctrl, c);
524 channel_stop(ctrl, c);
526 ctrl->channels[c].regs[RW_SAVED_DATA] =
527 (uint32_t)ctrl->channels[c].current_d.next;
528 /* Load new descriptor. */
529 channel_load_d(ctrl, c);
530 saved_data_buf = (uint32_t)
531 ctrl->channels[c].current_d.buf;
535 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
539 static inline void channel_in_run(struct fs_dma_ctrl *ctrl, int c)
541 if (ctrl->channels[c].client->client.pull)
542 ctrl->channels[c].client->client.pull(
543 ctrl->channels[c].client->client.opaque);
546 static uint32_t dma_rinvalid (void *opaque, target_phys_addr_t addr)
548 struct fs_dma_ctrl *ctrl = opaque;
549 CPUState *env = ctrl->env;
550 cpu_abort(env, "Unsupported short access. reg=" TARGET_FMT_plx "\n",
556 dma_readl (void *opaque, target_phys_addr_t addr)
558 struct fs_dma_ctrl *ctrl = opaque;
562 /* Make addr relative to this instances base. */
563 c = fs_channel(ctrl->base, addr);
568 r = ctrl->channels[c].state & 7;
569 r |= ctrl->channels[c].eol << 5;
570 r |= ctrl->channels[c].stream_cmd_src << 8;
574 r = ctrl->channels[c].regs[addr];
575 D(printf ("%s c=%d addr=%x\n",
583 dma_winvalid (void *opaque, target_phys_addr_t addr, uint32_t value)
585 struct fs_dma_ctrl *ctrl = opaque;
586 CPUState *env = ctrl->env;
587 cpu_abort(env, "Unsupported short access. reg=" TARGET_FMT_plx "\n",
592 dma_update_state(struct fs_dma_ctrl *ctrl, int c)
594 if ((ctrl->channels[c].regs[RW_CFG] & 1) != 3) {
595 if (ctrl->channels[c].regs[RW_CFG] & 2)
596 ctrl->channels[c].state = STOPPED;
597 if (!(ctrl->channels[c].regs[RW_CFG] & 1))
598 ctrl->channels[c].state = RST;
603 dma_writel (void *opaque, target_phys_addr_t addr, uint32_t value)
605 struct fs_dma_ctrl *ctrl = opaque;
608 /* Make addr relative to this instances base. */
609 c = fs_channel(ctrl->base, addr);
614 ctrl->channels[c].regs[addr] = value;
618 ctrl->channels[c].regs[addr] = value;
619 dma_update_state(ctrl, c);
624 printf("Invalid store to ch=%d RW_CMD %x\n",
626 ctrl->channels[c].regs[addr] = value;
627 channel_continue(ctrl, c);
631 case RW_SAVED_DATA_BUF:
634 ctrl->channels[c].regs[addr] = value;
639 ctrl->channels[c].regs[addr] = value;
640 channel_update_irq(ctrl, c);
641 if (addr == RW_ACK_INTR)
642 ctrl->channels[c].regs[RW_ACK_INTR] = 0;
647 printf("Invalid store to ch=%d "
650 ctrl->channels[c].regs[addr] = value;
651 D(printf("stream_cmd ch=%d\n", c));
652 channel_stream_cmd(ctrl, c, value);
656 D(printf ("%s c=%d %x %x\n", __func__, c, addr));
661 static CPUReadMemoryFunc *dma_read[] = {
667 static CPUWriteMemoryFunc *dma_write[] = {
673 void etraxfs_dmac_run(void *opaque)
675 struct fs_dma_ctrl *ctrl = opaque;
680 i < ctrl->nr_channels;
683 if (ctrl->channels[i].state == RUNNING)
686 if (ctrl->channels[i].input)
687 channel_in_run(ctrl, i);
689 channel_out_run(ctrl, i);
694 int etraxfs_dmac_input(struct etraxfs_dma_client *client,
695 void *buf, int len, int eop)
697 return channel_in_process(client->ctrl, client->channel,
701 /* Connect an IRQ line with a channel. */
702 void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
704 struct fs_dma_ctrl *ctrl = opaque;
705 ctrl->channels[c].irq = line;
706 ctrl->channels[c].input = input;
709 void etraxfs_dmac_connect_client(void *opaque, int c,
710 struct etraxfs_dma_client *cl)
712 struct fs_dma_ctrl *ctrl = opaque;
715 ctrl->channels[c].client = cl;
719 static void DMA_run(void *opaque)
721 struct fs_dma_ctrl *etraxfs_dmac = opaque;
723 etraxfs_dmac_run(etraxfs_dmac);
724 qemu_bh_schedule_idle(etraxfs_dmac->bh);
727 void *etraxfs_dmac_init(CPUState *env,
728 target_phys_addr_t base, int nr_channels)
730 struct fs_dma_ctrl *ctrl = NULL;
733 ctrl = qemu_mallocz(sizeof *ctrl);
737 ctrl->bh = qemu_bh_new(DMA_run, ctrl);
738 qemu_bh_schedule_idle(ctrl->bh);
742 ctrl->nr_channels = nr_channels;
743 ctrl->channels = qemu_mallocz(sizeof ctrl->channels[0] * nr_channels);
747 for (i = 0; i < nr_channels; i++)
749 ctrl->channels[i].regmap = cpu_register_io_memory(0,
753 cpu_register_physical_memory (base + i * 0x2000,
754 sizeof ctrl->channels[i].regs,
755 ctrl->channels[i].regmap);
760 qemu_free(ctrl->channels);