2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "scsi-disk.h"
28 /* FIXME: Only needed for MAX_DISKS, which is probably wrong. */
35 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
36 * also produced as NCR89C100. See
37 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
43 #define DPRINTF(fmt, args...) \
44 do { printf("ESP: " fmt , ##args); } while (0)
46 #define DPRINTF(fmt, args...)
51 #define ESP_SIZE (ESP_REGS * 4)
53 /* The HBA is ID 7, so for simplicitly limit to 7 devices. */
54 #define ESP_MAX_DEVS 7
56 typedef struct ESPState ESPState;
60 BlockDriverState **bd;
61 uint8_t rregs[ESP_REGS];
62 uint8_t wregs[ESP_REGS];
64 uint32_t ti_rptr, ti_wptr;
65 uint8_t ti_buf[TI_BUFSZ];
68 SCSIDevice *scsi_dev[MAX_DISKS];
69 SCSIDevice *current_dev;
70 uint8_t cmdbuf[TI_BUFSZ];
74 /* The amount of data left in the current DMA transfer. */
76 /* The size of the current DMA transfer. Zero if no transfer is in
89 #define ESP_WBUSID 0x4
93 #define ESP_WSYNTP 0x6
94 #define ESP_RFLAGS 0x7
100 #define ESP_WTEST 0xa
111 #define CMD_FLUSH 0x01
112 #define CMD_RESET 0x02
113 #define CMD_BUSRESET 0x03
115 #define CMD_ICCS 0x11
116 #define CMD_MSGACC 0x12
117 #define CMD_SATN 0x1a
118 #define CMD_SELATN 0x42
119 #define CMD_SELATNS 0x43
120 #define CMD_ENSEL 0x44
128 #define STAT_PIO_MASK 0x06
138 #define INTR_RST 0x80
143 #define CFG1_RESREPT 0x40
145 #define CFG2_MASK 0x15
147 #define TCHI_FAS100A 0x4
149 static int get_cmd(ESPState *s, uint8_t *buf)
154 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
155 target = s->wregs[ESP_WBUSID] & 7;
156 DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
158 espdma_memory_read(s->dma_opaque, buf, dmalen);
161 memcpy(&buf[1], s->ti_buf, dmalen);
169 if (s->current_dev) {
170 /* Started a new command before the old one finished. Cancel it. */
171 scsi_cancel_io(s->current_dev, 0);
175 if (target >= MAX_DISKS || !s->scsi_dev[target]) {
177 s->rregs[ESP_RSTAT] = STAT_IN;
178 s->rregs[ESP_RINTR] = INTR_DC;
179 s->rregs[ESP_RSEQ] = SEQ_0;
180 qemu_irq_raise(s->irq);
183 s->current_dev = s->scsi_dev[target];
187 static void do_cmd(ESPState *s, uint8_t *buf)
192 DPRINTF("do_cmd: busid 0x%x\n", buf[0]);
194 datalen = scsi_send_command(s->current_dev, 0, &buf[1], lun);
195 s->ti_size = datalen;
197 s->rregs[ESP_RSTAT] = STAT_IN | STAT_TC;
201 s->rregs[ESP_RSTAT] |= STAT_DI;
202 scsi_read_data(s->current_dev, 0);
204 s->rregs[ESP_RSTAT] |= STAT_DO;
205 scsi_write_data(s->current_dev, 0);
208 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
209 s->rregs[ESP_RSEQ] = SEQ_CD;
210 qemu_irq_raise(s->irq);
213 static void handle_satn(ESPState *s)
218 len = get_cmd(s, buf);
223 static void handle_satn_stop(ESPState *s)
225 s->cmdlen = get_cmd(s, s->cmdbuf);
227 DPRINTF("Set ATN & Stop: cmdlen %d\n", s->cmdlen);
229 s->rregs[ESP_RSTAT] = STAT_IN | STAT_TC | STAT_CD;
230 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
231 s->rregs[ESP_RSEQ] = SEQ_CD;
232 qemu_irq_raise(s->irq);
236 static void write_response(ESPState *s)
238 DPRINTF("Transfer status (sense=%d)\n", s->sense);
239 s->ti_buf[0] = s->sense;
242 espdma_memory_write(s->dma_opaque, s->ti_buf, 2);
243 s->rregs[ESP_RSTAT] = STAT_IN | STAT_TC | STAT_ST;
244 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
245 s->rregs[ESP_RSEQ] = SEQ_CD;
250 s->rregs[ESP_RFLAGS] = 2;
252 qemu_irq_raise(s->irq);
255 static void esp_dma_done(ESPState *s)
257 s->rregs[ESP_RSTAT] |= STAT_IN | STAT_TC;
258 s->rregs[ESP_RINTR] = INTR_BS;
259 s->rregs[ESP_RSEQ] = 0;
260 s->rregs[ESP_RFLAGS] = 0;
261 s->rregs[ESP_TCLO] = 0;
262 s->rregs[ESP_TCMID] = 0;
263 qemu_irq_raise(s->irq);
266 static void esp_do_dma(ESPState *s)
271 to_device = (s->ti_size < 0);
274 DPRINTF("command len %d + %d\n", s->cmdlen, len);
275 espdma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
279 do_cmd(s, s->cmdbuf);
282 if (s->async_len == 0) {
283 /* Defer until data is available. */
286 if (len > s->async_len) {
290 espdma_memory_read(s->dma_opaque, s->async_buf, len);
292 espdma_memory_write(s->dma_opaque, s->async_buf, len);
301 if (s->async_len == 0) {
303 // ti_size is negative
304 scsi_write_data(s->current_dev, 0);
306 scsi_read_data(s->current_dev, 0);
307 /* If there is still data to be read from the device then
308 complete the DMA operation immeriately. Otherwise defer
309 until the scsi layer has completed. */
310 if (s->dma_left == 0 && s->ti_size > 0) {
315 /* Partially filled a scsi buffer. Complete immediately. */
320 static void esp_command_complete(void *opaque, int reason, uint32_t tag,
323 ESPState *s = (ESPState *)opaque;
325 if (reason == SCSI_REASON_DONE) {
326 DPRINTF("SCSI Command complete\n");
328 DPRINTF("SCSI command completed unexpectedly\n");
333 DPRINTF("Command failed\n");
335 s->rregs[ESP_RSTAT] = STAT_ST;
337 s->current_dev = NULL;
339 DPRINTF("transfer %d/%d\n", s->dma_left, s->ti_size);
341 s->async_buf = scsi_get_buf(s->current_dev, 0);
344 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
345 /* If this was the last part of a DMA transfer then the
346 completion interrupt is deferred to here. */
352 static void handle_ti(ESPState *s)
354 uint32_t dmalen, minlen;
356 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
360 s->dma_counter = dmalen;
363 minlen = (dmalen < 32) ? dmalen : 32;
364 else if (s->ti_size < 0)
365 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
367 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
368 DPRINTF("Transfer Information len %d\n", minlen);
370 s->dma_left = minlen;
371 s->rregs[ESP_RSTAT] &= ~STAT_TC;
373 } else if (s->do_cmd) {
374 DPRINTF("command len %d\n", s->cmdlen);
378 do_cmd(s, s->cmdbuf);
383 static void esp_reset(void *opaque)
385 ESPState *s = opaque;
387 memset(s->rregs, 0, ESP_REGS);
388 memset(s->wregs, 0, ESP_REGS);
389 s->rregs[ESP_TCHI] = TCHI_FAS100A; // Indicate fas100a
397 static void parent_esp_reset(void *opaque, int irq, int level)
403 static uint32_t esp_mem_readb(void *opaque, target_phys_addr_t addr)
405 ESPState *s = opaque;
408 saddr = (addr & ESP_MASK) >> 2;
409 DPRINTF("read reg[%d]: 0x%2.2x\n", saddr, s->rregs[saddr]);
412 if (s->ti_size > 0) {
414 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
416 fprintf(stderr, "esp: PIO data read not implemented\n");
417 s->rregs[ESP_FIFO] = 0;
419 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
421 qemu_irq_raise(s->irq);
423 if (s->ti_size == 0) {
429 // Clear interrupt/error status bits
430 s->rregs[ESP_RSTAT] &= ~(STAT_IN | STAT_GE | STAT_PE);
431 qemu_irq_lower(s->irq);
436 return s->rregs[saddr];
439 static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
441 ESPState *s = opaque;
444 saddr = (addr & ESP_MASK) >> 2;
445 DPRINTF("write reg[%d]: 0x%2.2x -> 0x%2.2x\n", saddr, s->wregs[saddr],
450 s->rregs[ESP_RSTAT] &= ~STAT_TC;
454 s->cmdbuf[s->cmdlen++] = val & 0xff;
455 } else if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
459 fprintf(stderr, "esp: PIO data write not implemented\n");
462 s->ti_buf[s->ti_wptr++] = val & 0xff;
466 s->rregs[saddr] = val;
469 /* Reload DMA counter. */
470 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
471 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
475 switch(val & CMD_CMD) {
477 DPRINTF("NOP (%2.2x)\n", val);
480 DPRINTF("Flush FIFO (%2.2x)\n", val);
482 s->rregs[ESP_RINTR] = INTR_FC;
483 s->rregs[ESP_RSEQ] = 0;
486 DPRINTF("Chip reset (%2.2x)\n", val);
490 DPRINTF("Bus reset (%2.2x)\n", val);
491 s->rregs[ESP_RINTR] = INTR_RST;
492 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
493 qemu_irq_raise(s->irq);
500 DPRINTF("Initiator Command Complete Sequence (%2.2x)\n", val);
504 DPRINTF("Message Accepted (%2.2x)\n", val);
506 s->rregs[ESP_RINTR] = INTR_DC;
507 s->rregs[ESP_RSEQ] = 0;
510 DPRINTF("Set ATN (%2.2x)\n", val);
513 DPRINTF("Set ATN (%2.2x)\n", val);
517 DPRINTF("Set ATN & stop (%2.2x)\n", val);
521 DPRINTF("Enable selection (%2.2x)\n", val);
524 DPRINTF("Unhandled ESP command (%2.2x)\n", val);
528 case ESP_WBUSID ... ESP_WSYNO:
531 s->rregs[saddr] = val;
533 case ESP_WCCF ... ESP_WTEST:
536 s->rregs[saddr] = val & CFG2_MASK;
538 case ESP_CFG3 ... ESP_RES4:
539 s->rregs[saddr] = val;
544 s->wregs[saddr] = val;
547 static CPUReadMemoryFunc *esp_mem_read[3] = {
553 static CPUWriteMemoryFunc *esp_mem_write[3] = {
559 static void esp_save(QEMUFile *f, void *opaque)
561 ESPState *s = opaque;
563 qemu_put_buffer(f, s->rregs, ESP_REGS);
564 qemu_put_buffer(f, s->wregs, ESP_REGS);
565 qemu_put_be32s(f, &s->ti_size);
566 qemu_put_be32s(f, &s->ti_rptr);
567 qemu_put_be32s(f, &s->ti_wptr);
568 qemu_put_buffer(f, s->ti_buf, TI_BUFSZ);
569 qemu_put_be32s(f, &s->sense);
570 qemu_put_be32s(f, &s->dma);
571 qemu_put_buffer(f, s->cmdbuf, TI_BUFSZ);
572 qemu_put_be32s(f, &s->cmdlen);
573 qemu_put_be32s(f, &s->do_cmd);
574 qemu_put_be32s(f, &s->dma_left);
575 // There should be no transfers in progress, so dma_counter is not saved
578 static int esp_load(QEMUFile *f, void *opaque, int version_id)
580 ESPState *s = opaque;
583 return -EINVAL; // Cannot emulate 2
585 qemu_get_buffer(f, s->rregs, ESP_REGS);
586 qemu_get_buffer(f, s->wregs, ESP_REGS);
587 qemu_get_be32s(f, &s->ti_size);
588 qemu_get_be32s(f, &s->ti_rptr);
589 qemu_get_be32s(f, &s->ti_wptr);
590 qemu_get_buffer(f, s->ti_buf, TI_BUFSZ);
591 qemu_get_be32s(f, &s->sense);
592 qemu_get_be32s(f, &s->dma);
593 qemu_get_buffer(f, s->cmdbuf, TI_BUFSZ);
594 qemu_get_be32s(f, &s->cmdlen);
595 qemu_get_be32s(f, &s->do_cmd);
596 qemu_get_be32s(f, &s->dma_left);
601 void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id)
603 ESPState *s = (ESPState *)opaque;
606 for (id = 0; id < ESP_MAX_DEVS; id++) {
607 if (s->scsi_dev[id] == NULL)
611 if (id >= ESP_MAX_DEVS) {
612 DPRINTF("Bad Device ID %d\n", id);
615 if (s->scsi_dev[id]) {
616 DPRINTF("Destroying device %d\n", id);
617 scsi_disk_destroy(s->scsi_dev[id]);
619 DPRINTF("Attaching block device %d\n", id);
620 /* Command queueing is not implemented. */
621 s->scsi_dev[id] = scsi_disk_init(bd, 0, esp_command_complete, s);
624 void *esp_init(BlockDriverState **bd, target_phys_addr_t espaddr,
625 void *dma_opaque, qemu_irq irq, qemu_irq *reset)
630 s = qemu_mallocz(sizeof(ESPState));
636 s->dma_opaque = dma_opaque;
638 esp_io_memory = cpu_register_io_memory(0, esp_mem_read, esp_mem_write, s);
639 cpu_register_physical_memory(espaddr, ESP_SIZE, esp_io_memory);
643 register_savevm("esp", espaddr, 3, esp_save, esp_load, s);
644 qemu_register_reset(esp_reset, s);
646 *reset = *qemu_allocate_irqs(parent_esp_reset, s, 1);