659a839f97b0de5e4f403ae9a58387b7155109f4
[qemu] / hw / xen_disk.c
1 /*
2  *  xen paravirt block device backend
3  *
4  *  (c) Gerd Hoffmann <kraxel@redhat.com>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; under version 2 of the License.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License along
16  *  with this program; if not, write to the Free Software Foundation, Inc.,
17  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <unistd.h>
25 #include <signal.h>
26 #include <inttypes.h>
27 #include <time.h>
28 #include <fcntl.h>
29 #include <errno.h>
30 #include <sys/ioctl.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/mman.h>
34 #include <sys/uio.h>
35
36 #include <xs.h>
37 #include <xenctrl.h>
38 #include <xen/io/xenbus.h>
39
40 #include "hw.h"
41 #include "block_int.h"
42 #include "qemu-char.h"
43 #include "xen_blkif.h"
44 #include "xen_backend.h"
45
46 /* ------------------------------------------------------------- */
47
48 static int syncwrite    = 0;
49 static int batch_maps   = 0;
50
51 static int max_requests = 32;
52 static int use_aio      = 1;
53
54 /* ------------------------------------------------------------- */
55
56 #define BLOCK_SIZE  512
57 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
58
59 struct ioreq {
60     blkif_request_t     req;
61     int16_t             status;
62
63     /* parsed request */
64     off_t               start;
65     QEMUIOVector        v;
66     int                 presync;
67     int                 postsync;
68
69     /* grant mapping */
70     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
71     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
72     int                 prot;
73     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74     void                *pages;
75
76     /* aio status */
77     int                 aio_inflight;
78     int                 aio_errors;
79
80     struct XenBlkDev    *blkdev;
81     LIST_ENTRY(ioreq)   list;
82 };
83
84 struct XenBlkDev {
85     struct XenDevice    xendev;  /* must be first */
86     char                *params;
87     char                *mode;
88     char                *type;
89     char                *dev;
90     char                *devtype;
91     const char          *fileproto;
92     const char          *filename;
93     int                 ring_ref;
94     void                *sring;
95     int64_t             file_blk;
96     int64_t             file_size;
97     int                 protocol;
98     blkif_back_rings_t  rings;
99     int                 more_work;
100     int                 cnt_map;
101
102     /* request lists */
103     LIST_HEAD(inflight_head, ioreq) inflight;
104     LIST_HEAD(finished_head, ioreq) finished;
105     LIST_HEAD(freelist_head, ioreq) freelist;
106     int                 requests_total;
107     int                 requests_inflight;
108     int                 requests_finished;
109
110     /* qemu block driver */
111     int                 index;
112     BlockDriverState    *bs;
113     QEMUBH              *bh;
114 };
115
116 /* ------------------------------------------------------------- */
117
118 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
119 {
120     struct ioreq *ioreq = NULL;
121
122     if (LIST_EMPTY(&blkdev->freelist)) {
123         if (blkdev->requests_total >= max_requests)
124             goto out;
125         /* allocate new struct */
126         ioreq = qemu_mallocz(sizeof(*ioreq));
127         ioreq->blkdev = blkdev;
128         blkdev->requests_total++;
129         qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
130     } else {
131         /* get one from freelist */
132         ioreq = LIST_FIRST(&blkdev->freelist);
133         LIST_REMOVE(ioreq, list);
134         qemu_iovec_reset(&ioreq->v);
135     }
136     LIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
137     blkdev->requests_inflight++;
138
139 out:
140     return ioreq;
141 }
142
143 static void ioreq_finish(struct ioreq *ioreq)
144 {
145     struct XenBlkDev *blkdev = ioreq->blkdev;
146
147     LIST_REMOVE(ioreq, list);
148     LIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
149     blkdev->requests_inflight--;
150     blkdev->requests_finished++;
151 }
152
153 static void ioreq_release(struct ioreq *ioreq)
154 {
155     struct XenBlkDev *blkdev = ioreq->blkdev;
156
157     LIST_REMOVE(ioreq, list);
158     memset(ioreq, 0, sizeof(*ioreq));
159     ioreq->blkdev = blkdev;
160     LIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
161     blkdev->requests_finished--;
162 }
163
164 /*
165  * translate request into iovec + start offset
166  * do sanity checks along the way
167  */
168 static int ioreq_parse(struct ioreq *ioreq)
169 {
170     struct XenBlkDev *blkdev = ioreq->blkdev;
171     uintptr_t mem;
172     size_t len;
173     int i;
174
175     xen_be_printf(&blkdev->xendev, 3,
176                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
177                   ioreq->req.operation, ioreq->req.nr_segments,
178                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
179     switch (ioreq->req.operation) {
180     case BLKIF_OP_READ:
181         ioreq->prot = PROT_WRITE; /* to memory */
182         break;
183     case BLKIF_OP_WRITE_BARRIER:
184         if (!syncwrite)
185             ioreq->presync = ioreq->postsync = 1;
186         /* fall through */
187     case BLKIF_OP_WRITE:
188         ioreq->prot = PROT_READ; /* from memory */
189         if (syncwrite)
190             ioreq->postsync = 1;
191         break;
192     default:
193         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
194                       ioreq->req.operation);
195         goto err;
196     };
197
198     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
199         xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
200         goto err;
201     }
202
203     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
204     for (i = 0; i < ioreq->req.nr_segments; i++) {
205         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
206             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
207             goto err;
208         }
209         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
210             xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
211             goto err;
212         }
213         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
214             xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
215             goto err;
216         }
217
218         ioreq->domids[i] = blkdev->xendev.dom;
219         ioreq->refs[i]   = ioreq->req.seg[i].gref;
220
221         mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
222         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
223         qemu_iovec_add(&ioreq->v, (void*)mem, len);
224     }
225     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
226         xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
227         goto err;
228     }
229     return 0;
230
231 err:
232     ioreq->status = BLKIF_RSP_ERROR;
233     return -1;
234 }
235
236 static void ioreq_unmap(struct ioreq *ioreq)
237 {
238     int gnt = ioreq->blkdev->xendev.gnttabdev;
239     int i;
240
241     if (ioreq->v.niov == 0)
242         return;
243     if (batch_maps) {
244         if (!ioreq->pages)
245             return;
246         if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0)
247             xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
248                           strerror(errno));
249         ioreq->blkdev->cnt_map -= ioreq->v.niov;
250         ioreq->pages = NULL;
251     } else {
252         for (i = 0; i < ioreq->v.niov; i++) {
253             if (!ioreq->page[i])
254                 continue;
255             if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0)
256                 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
257                               strerror(errno));
258             ioreq->blkdev->cnt_map--;
259             ioreq->page[i] = NULL;
260         }
261     }
262 }
263
264 static int ioreq_map(struct ioreq *ioreq)
265 {
266     int gnt = ioreq->blkdev->xendev.gnttabdev;
267     int i;
268
269     if (ioreq->v.niov == 0)
270         return 0;
271     if (batch_maps) {
272         ioreq->pages = xc_gnttab_map_grant_refs
273             (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
274         if (ioreq->pages == NULL) {
275             xen_be_printf(&ioreq->blkdev->xendev, 0,
276                           "can't map %d grant refs (%s, %d maps)\n",
277                           ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map);
278             return -1;
279         }
280         for (i = 0; i < ioreq->v.niov; i++)
281             ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
282                 (uintptr_t)ioreq->v.iov[i].iov_base;
283         ioreq->blkdev->cnt_map += ioreq->v.niov;
284     } else  {
285         for (i = 0; i < ioreq->v.niov; i++) {
286             ioreq->page[i] = xc_gnttab_map_grant_ref
287                 (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
288             if (ioreq->page[i] == NULL) {
289                 xen_be_printf(&ioreq->blkdev->xendev, 0,
290                               "can't map grant ref %d (%s, %d maps)\n",
291                               ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map);
292                 ioreq_unmap(ioreq);
293                 return -1;
294             }
295             ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base;
296             ioreq->blkdev->cnt_map++;
297         }
298     }
299     return 0;
300 }
301
302 static int ioreq_runio_qemu_sync(struct ioreq *ioreq)
303 {
304     struct XenBlkDev *blkdev = ioreq->blkdev;
305     int i, rc, len = 0;
306     off_t pos;
307
308     if (ioreq_map(ioreq) == -1)
309         goto err;
310     if (ioreq->presync)
311         bdrv_flush(blkdev->bs);
312
313     switch (ioreq->req.operation) {
314     case BLKIF_OP_READ:
315         pos = ioreq->start;
316         for (i = 0; i < ioreq->v.niov; i++) {
317             rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE,
318                            ioreq->v.iov[i].iov_base,
319                            ioreq->v.iov[i].iov_len / BLOCK_SIZE);
320             if (rc != 0) {
321                 xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n",
322                               ioreq->v.iov[i].iov_base,
323                               ioreq->v.iov[i].iov_len);
324                 goto err;
325             }
326             len += ioreq->v.iov[i].iov_len;
327             pos += ioreq->v.iov[i].iov_len;
328         }
329         break;
330     case BLKIF_OP_WRITE:
331     case BLKIF_OP_WRITE_BARRIER:
332         pos = ioreq->start;
333         for (i = 0; i < ioreq->v.niov; i++) {
334             rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE,
335                             ioreq->v.iov[i].iov_base,
336                             ioreq->v.iov[i].iov_len / BLOCK_SIZE);
337             if (rc != 0) {
338                 xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n",
339                               ioreq->v.iov[i].iov_base,
340                               ioreq->v.iov[i].iov_len);
341                 goto err;
342             }
343             len += ioreq->v.iov[i].iov_len;
344             pos += ioreq->v.iov[i].iov_len;
345         }
346         break;
347     default:
348         /* unknown operation (shouldn't happen -- parse catches this) */
349         goto err;
350     }
351
352     if (ioreq->postsync)
353         bdrv_flush(blkdev->bs);
354     ioreq->status = BLKIF_RSP_OKAY;
355
356     ioreq_unmap(ioreq);
357     ioreq_finish(ioreq);
358     return 0;
359
360 err:
361     ioreq->status = BLKIF_RSP_ERROR;
362     return -1;
363 }
364
365 static void qemu_aio_complete(void *opaque, int ret)
366 {
367     struct ioreq *ioreq = opaque;
368
369     if (ret != 0) {
370         xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
371                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
372         ioreq->aio_errors++;
373     }
374
375     ioreq->aio_inflight--;
376     if (ioreq->aio_inflight > 0)
377         return;
378
379     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
380     ioreq_unmap(ioreq);
381     ioreq_finish(ioreq);
382     qemu_bh_schedule(ioreq->blkdev->bh);
383 }
384
385 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
386 {
387     struct XenBlkDev *blkdev = ioreq->blkdev;
388
389     if (ioreq_map(ioreq) == -1)
390         goto err;
391
392     ioreq->aio_inflight++;
393     if (ioreq->presync)
394         bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
395
396     switch (ioreq->req.operation) {
397     case BLKIF_OP_READ:
398         ioreq->aio_inflight++;
399         bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
400                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
401                        qemu_aio_complete, ioreq);
402         break;
403     case BLKIF_OP_WRITE:
404     case BLKIF_OP_WRITE_BARRIER:
405         ioreq->aio_inflight++;
406         bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
407                         &ioreq->v, ioreq->v.size / BLOCK_SIZE,
408                         qemu_aio_complete, ioreq);
409         break;
410     default:
411         /* unknown operation (shouldn't happen -- parse catches this) */
412         goto err;
413     }
414
415     if (ioreq->postsync)
416         bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
417     qemu_aio_complete(ioreq, 0);
418
419     return 0;
420
421 err:
422     ioreq->status = BLKIF_RSP_ERROR;
423     return -1;
424 }
425
426 static int blk_send_response_one(struct ioreq *ioreq)
427 {
428     struct XenBlkDev  *blkdev = ioreq->blkdev;
429     int               send_notify   = 0;
430     int               have_requests = 0;
431     blkif_response_t  resp;
432     void              *dst;
433
434     resp.id        = ioreq->req.id;
435     resp.operation = ioreq->req.operation;
436     resp.status    = ioreq->status;
437
438     /* Place on the response ring for the relevant domain. */
439     switch (blkdev->protocol) {
440     case BLKIF_PROTOCOL_NATIVE:
441         dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
442         break;
443     case BLKIF_PROTOCOL_X86_32:
444         dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
445                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
446         break;
447     case BLKIF_PROTOCOL_X86_64:
448         dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
449                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
450         break;
451     default:
452         dst = NULL;
453     }
454     memcpy(dst, &resp, sizeof(resp));
455     blkdev->rings.common.rsp_prod_pvt++;
456
457     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
458     if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
459         /*
460          * Tail check for pending requests. Allows frontend to avoid
461          * notifications if requests are already in flight (lower
462          * overheads and promotes batching).
463          */
464         RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
465     } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
466         have_requests = 1;
467     }
468
469     if (have_requests)
470         blkdev->more_work++;
471     return send_notify;
472 }
473
474 /* walk finished list, send outstanding responses, free requests */
475 static void blk_send_response_all(struct XenBlkDev *blkdev)
476 {
477     struct ioreq *ioreq;
478     int send_notify = 0;
479
480     while (!LIST_EMPTY(&blkdev->finished)) {
481         ioreq = LIST_FIRST(&blkdev->finished);
482         send_notify += blk_send_response_one(ioreq);
483         ioreq_release(ioreq);
484     }
485     if (send_notify)
486         xen_be_send_notify(&blkdev->xendev);
487 }
488
489 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
490 {
491     switch (blkdev->protocol) {
492     case BLKIF_PROTOCOL_NATIVE:
493         memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
494                sizeof(ioreq->req));
495         break;
496     case BLKIF_PROTOCOL_X86_32:
497         blkif_get_x86_32_req(&ioreq->req,
498                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
499         break;
500     case BLKIF_PROTOCOL_X86_64:
501         blkif_get_x86_64_req(&ioreq->req,
502                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
503         break;
504     }
505     return 0;
506 }
507
508 static void blk_handle_requests(struct XenBlkDev *blkdev)
509 {
510     RING_IDX rc, rp;
511     struct ioreq *ioreq;
512
513     blkdev->more_work = 0;
514
515     rc = blkdev->rings.common.req_cons;
516     rp = blkdev->rings.common.sring->req_prod;
517     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
518
519     if (use_aio)
520         blk_send_response_all(blkdev);
521     while (rc != rp) {
522         /* pull request from ring */
523         if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc))
524             break;
525         ioreq = ioreq_start(blkdev);
526         if (ioreq == NULL) {
527             blkdev->more_work++;
528             break;
529         }
530         blk_get_request(blkdev, ioreq, rc);
531         blkdev->rings.common.req_cons = ++rc;
532
533         /* parse them */
534         if (ioreq_parse(ioreq) != 0) {
535             if (blk_send_response_one(ioreq))
536                 xen_be_send_notify(&blkdev->xendev);
537             ioreq_release(ioreq);
538             continue;
539         }
540
541         if (use_aio) {
542             /* run i/o in aio mode */
543             ioreq_runio_qemu_aio(ioreq);
544         } else {
545             /* run i/o in sync mode */
546             ioreq_runio_qemu_sync(ioreq);
547         }
548     }
549     if (!use_aio)
550         blk_send_response_all(blkdev);
551
552     if (blkdev->more_work && blkdev->requests_inflight < max_requests)
553         qemu_bh_schedule(blkdev->bh);
554 }
555
556 /* ------------------------------------------------------------- */
557
558 static void blk_bh(void *opaque)
559 {
560     struct XenBlkDev *blkdev = opaque;
561     blk_handle_requests(blkdev);
562 }
563
564 static void blk_alloc(struct XenDevice *xendev)
565 {
566     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
567
568     LIST_INIT(&blkdev->inflight);
569     LIST_INIT(&blkdev->finished);
570     LIST_INIT(&blkdev->freelist);
571     blkdev->bh = qemu_bh_new(blk_bh, blkdev);
572     if (xen_mode != XEN_EMULATE)
573         batch_maps = 1;
574 }
575
576 static int blk_init(struct XenDevice *xendev)
577 {
578     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
579     int mode, qflags, have_barriers, info = 0;
580     char *h;
581
582     /* read xenstore entries */
583     if (blkdev->params == NULL) {
584         blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
585         h = strchr(blkdev->params, ':');
586         if (h != NULL) {
587             blkdev->fileproto = blkdev->params;
588             blkdev->filename  = h+1;
589             *h = 0;
590         } else {
591             blkdev->fileproto = "<unset>";
592             blkdev->filename  = blkdev->params;
593         }
594     }
595     if (blkdev->mode == NULL)
596         blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
597     if (blkdev->type == NULL)
598         blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
599     if (blkdev->dev == NULL)
600         blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
601     if (blkdev->devtype == NULL)
602         blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
603
604     /* do we have all we need? */
605     if (blkdev->params == NULL ||
606         blkdev->mode == NULL   ||
607         blkdev->type == NULL   ||
608         blkdev->dev == NULL)
609         return -1;
610
611     /* read-only ? */
612     if (strcmp(blkdev->mode, "w") == 0) {
613         mode   = O_RDWR;
614         qflags = BDRV_O_RDWR;
615     } else {
616         mode   = O_RDONLY;
617         qflags = BDRV_O_RDONLY;
618         info  |= VDISK_READONLY;
619     }
620
621     /* cdrom ? */
622     if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom"))
623         info  |= VDISK_CDROM;
624
625     /* init qemu block driver */
626     blkdev->index = (blkdev->xendev.dev - 202 * 256) / 16;
627     blkdev->index = drive_get_index(IF_XEN, 0, blkdev->index);
628     if (blkdev->index == -1) {
629         /* setup via xenbus -> create new block driver instance */
630         xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
631         blkdev->bs = bdrv_new(blkdev->dev);
632         if (blkdev->bs) {
633             if (bdrv_open2(blkdev->bs, blkdev->filename, qflags,
634                            bdrv_find_format(blkdev->fileproto)) != 0) {
635                 bdrv_delete(blkdev->bs);
636                 blkdev->bs = NULL;
637             }
638         }
639         if (!blkdev->bs)
640             return -1;
641     } else {
642         /* setup via qemu cmdline -> already setup for us */
643         xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
644         blkdev->bs = drives_table[blkdev->index].bdrv;
645     }
646     blkdev->file_blk  = BLOCK_SIZE;
647     blkdev->file_size = bdrv_getlength(blkdev->bs);
648     if (blkdev->file_size < 0) {
649         xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
650                       (int)blkdev->file_size, strerror(-blkdev->file_size),
651                       blkdev->bs->drv ? blkdev->bs->drv->format_name : "-");
652         blkdev->file_size = 0;
653     }
654     have_barriers = blkdev->bs->drv && blkdev->bs->drv->bdrv_flush ? 1 : 0;
655
656     xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
657                   " size %" PRId64 " (%" PRId64 " MB)\n",
658                   blkdev->type, blkdev->fileproto, blkdev->filename,
659                   blkdev->file_size, blkdev->file_size >> 20);
660
661     /* fill info */
662     xenstore_write_be_int(&blkdev->xendev, "feature-barrier", have_barriers);
663     xenstore_write_be_int(&blkdev->xendev, "info",            info);
664     xenstore_write_be_int(&blkdev->xendev, "sector-size",     blkdev->file_blk);
665     xenstore_write_be_int(&blkdev->xendev, "sectors",
666                           blkdev->file_size / blkdev->file_blk);
667     return 0;
668 }
669
670 static int blk_connect(struct XenDevice *xendev)
671 {
672     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
673
674     if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1)
675         return -1;
676     if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
677                              &blkdev->xendev.remote_port) == -1)
678         return -1;
679
680     blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
681     if (blkdev->xendev.protocol) {
682         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0)
683             blkdev->protocol = BLKIF_PROTOCOL_X86_32;
684         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0)
685             blkdev->protocol = BLKIF_PROTOCOL_X86_64;
686     }
687
688     blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
689                                             blkdev->xendev.dom,
690                                             blkdev->ring_ref,
691                                             PROT_READ | PROT_WRITE);
692     if (!blkdev->sring)
693         return -1;
694     blkdev->cnt_map++;
695
696     switch (blkdev->protocol) {
697     case BLKIF_PROTOCOL_NATIVE:
698     {
699         blkif_sring_t *sring_native = blkdev->sring;
700         BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
701         break;
702     }
703     case BLKIF_PROTOCOL_X86_32:
704     {
705         blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
706
707         BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
708         break;
709     }
710     case BLKIF_PROTOCOL_X86_64:
711     {
712         blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
713
714         BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
715         break;
716     }
717     }
718
719     xen_be_bind_evtchn(&blkdev->xendev);
720
721     xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
722                   "remote port %d, local port %d\n",
723                   blkdev->xendev.protocol, blkdev->ring_ref,
724                   blkdev->xendev.remote_port, blkdev->xendev.local_port);
725     return 0;
726 }
727
728 static void blk_disconnect(struct XenDevice *xendev)
729 {
730     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
731
732     if (blkdev->bs) {
733         if (blkdev->index == -1) {
734             /* close/delete only if we created it ourself */
735             bdrv_close(blkdev->bs);
736             bdrv_delete(blkdev->bs);
737         }
738         blkdev->bs = NULL;
739     }
740     xen_be_unbind_evtchn(&blkdev->xendev);
741
742     if (blkdev->sring) {
743         xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
744         blkdev->cnt_map--;
745         blkdev->sring = NULL;
746     }
747 }
748
749 static int blk_free(struct XenDevice *xendev)
750 {
751     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
752     struct ioreq *ioreq;
753
754     while (!LIST_EMPTY(&blkdev->freelist)) {
755         ioreq = LIST_FIRST(&blkdev->freelist);
756         LIST_REMOVE(ioreq, list);
757         qemu_iovec_destroy(&ioreq->v);
758         qemu_free(ioreq);
759     }
760
761     qemu_free(blkdev->params);
762     qemu_free(blkdev->mode);
763     qemu_free(blkdev->type);
764     qemu_free(blkdev->dev);
765     qemu_free(blkdev->devtype);
766     qemu_bh_delete(blkdev->bh);
767     return 0;
768 }
769
770 static void blk_event(struct XenDevice *xendev)
771 {
772     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
773
774     qemu_bh_schedule(blkdev->bh);
775 }
776
777 struct XenDevOps xen_blkdev_ops = {
778     .size       = sizeof(struct XenBlkDev),
779     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
780     .alloc      = blk_alloc,
781     .init       = blk_init,
782     .connect    = blk_connect,
783     .disconnect = blk_disconnect,
784     .event      = blk_event,
785     .free       = blk_free,
786 };