aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Virtio Block Device |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2007 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Anthony Liguori <aliguori@us.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
Stefan Weil | 5a61cb6 | 2011-09-08 17:55:32 +0200 | [diff] [blame] | 14 | #include "qemu-common.h" |
Markus Armbruster | d75d25e | 2010-07-06 14:37:43 +0200 | [diff] [blame] | 15 | #include "qemu-error.h" |
Stefan Hajnoczi | 6d519a5 | 2010-05-22 18:15:08 +0100 | [diff] [blame] | 16 | #include "trace.h" |
Markus Armbruster | 9db1c0f | 2012-07-10 11:12:31 +0200 | [diff] [blame] | 17 | #include "hw/block-common.h" |
Blue Swirl | 2446333 | 2010-08-24 15:22:24 +0000 | [diff] [blame] | 18 | #include "blockdev.h" |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 19 | #include "virtio-blk.h" |
Paolo Bonzini | 5bb2392 | 2011-11-02 13:19:40 +0100 | [diff] [blame] | 20 | #include "scsi-defs.h" |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 21 | #ifdef __linux__ |
| 22 | # include <scsi/sg.h> |
| 23 | #endif |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 24 | |
| 25 | typedef struct VirtIOBlock |
| 26 | { |
| 27 | VirtIODevice vdev; |
| 28 | BlockDriverState *bs; |
| 29 | VirtQueue *vq; |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 30 | void *rq; |
Markus Armbruster | 213189a | 2009-07-28 14:33:41 -0400 | [diff] [blame] | 31 | QEMUBH *bh; |
Christoph Hellwig | 9752c37 | 2010-02-10 23:37:25 +0100 | [diff] [blame] | 32 | BlockConf *conf; |
Paolo Bonzini | 12c5674 | 2012-05-16 12:54:05 +0200 | [diff] [blame] | 33 | VirtIOBlkConf *blk; |
Christoph Hellwig | 8cfacf0 | 2010-03-04 14:20:17 +0100 | [diff] [blame] | 34 | unsigned short sector_mask; |
Alex Williamson | 9d0d313 | 2010-07-20 11:14:22 -0600 | [diff] [blame] | 35 | DeviceState *qdev; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 36 | } VirtIOBlock; |
| 37 | |
| 38 | static VirtIOBlock *to_virtio_blk(VirtIODevice *vdev) |
| 39 | { |
| 40 | return (VirtIOBlock *)vdev; |
| 41 | } |
| 42 | |
| 43 | typedef struct VirtIOBlockReq |
| 44 | { |
| 45 | VirtIOBlock *dev; |
| 46 | VirtQueueElement elem; |
| 47 | struct virtio_blk_inhdr *in; |
| 48 | struct virtio_blk_outhdr *out; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 49 | struct virtio_scsi_inhdr *scsi; |
aliguori | d28a1b6 | 2009-03-28 17:46:14 +0000 | [diff] [blame] | 50 | QEMUIOVector qiov; |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 51 | struct VirtIOBlockReq *next; |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 52 | BlockAcctCookie acct; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 53 | } VirtIOBlockReq; |
| 54 | |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 55 | static void virtio_blk_req_complete(VirtIOBlockReq *req, int status) |
| 56 | { |
| 57 | VirtIOBlock *s = req->dev; |
| 58 | |
Stefan Hajnoczi | 6d519a5 | 2010-05-22 18:15:08 +0100 | [diff] [blame] | 59 | trace_virtio_blk_req_complete(req, status); |
| 60 | |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 61 | stb_p(&req->in->status, status); |
aliguori | d28a1b6 | 2009-03-28 17:46:14 +0000 | [diff] [blame] | 62 | virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in)); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 63 | virtio_notify(&s->vdev, s->vq); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 64 | } |
| 65 | |
Kevin Wolf | f35d68f | 2009-11-27 13:25:39 +0100 | [diff] [blame] | 66 | static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, |
Paolo Bonzini | 1ceee0d | 2012-09-28 17:22:56 +0200 | [diff] [blame] | 67 | bool is_read) |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 68 | { |
Paolo Bonzini | 3e1caa5 | 2012-09-28 17:22:57 +0200 | [diff] [blame] | 69 | BlockErrorAction action = bdrv_get_error_action(req->dev->bs, is_read, error); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 70 | VirtIOBlock *s = req->dev; |
| 71 | |
Paolo Bonzini | 3e1caa5 | 2012-09-28 17:22:57 +0200 | [diff] [blame] | 72 | if (action == BDRV_ACTION_STOP) { |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 73 | req->next = s->rq; |
| 74 | s->rq = req; |
Paolo Bonzini | 3e1caa5 | 2012-09-28 17:22:57 +0200 | [diff] [blame] | 75 | } else if (action == BDRV_ACTION_REPORT) { |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 76 | virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 77 | bdrv_acct_done(s->bs, &req->acct); |
| 78 | g_free(req); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 79 | } |
| 80 | |
Paolo Bonzini | 3e1caa5 | 2012-09-28 17:22:57 +0200 | [diff] [blame] | 81 | bdrv_error_action(s->bs, action, is_read, error); |
| 82 | return action != BDRV_ACTION_IGNORE; |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 83 | } |
| 84 | |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 85 | static void virtio_blk_rw_complete(void *opaque, int ret) |
| 86 | { |
| 87 | VirtIOBlockReq *req = opaque; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 88 | |
Stefan Hajnoczi | 6d519a5 | 2010-05-22 18:15:08 +0100 | [diff] [blame] | 89 | trace_virtio_blk_rw_complete(req, ret); |
| 90 | |
Kevin Wolf | f35d68f | 2009-11-27 13:25:39 +0100 | [diff] [blame] | 91 | if (ret) { |
Paolo Bonzini | 1ceee0d | 2012-09-28 17:22:56 +0200 | [diff] [blame] | 92 | bool is_read = !(ldl_p(&req->out->type) & VIRTIO_BLK_T_OUT); |
Kevin Wolf | f35d68f | 2009-11-27 13:25:39 +0100 | [diff] [blame] | 93 | if (virtio_blk_handle_rw_error(req, -ret, is_read)) |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 94 | return; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 95 | } |
| 96 | |
Kevin Wolf | f35d68f | 2009-11-27 13:25:39 +0100 | [diff] [blame] | 97 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 98 | bdrv_acct_done(req->dev->bs, &req->acct); |
| 99 | g_free(req); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 100 | } |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 101 | |
Christoph Hellwig | aa659be | 2009-09-04 19:02:23 +0200 | [diff] [blame] | 102 | static void virtio_blk_flush_complete(void *opaque, int ret) |
| 103 | { |
| 104 | VirtIOBlockReq *req = opaque; |
| 105 | |
Kevin Wolf | 8c269b5 | 2010-10-20 13:17:30 +0200 | [diff] [blame] | 106 | if (ret) { |
| 107 | if (virtio_blk_handle_rw_error(req, -ret, 0)) { |
| 108 | return; |
| 109 | } |
| 110 | } |
| 111 | |
| 112 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 113 | bdrv_acct_done(req->dev->bs, &req->acct); |
| 114 | g_free(req); |
Christoph Hellwig | aa659be | 2009-09-04 19:02:23 +0200 | [diff] [blame] | 115 | } |
| 116 | |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 117 | static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s) |
| 118 | { |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 119 | VirtIOBlockReq *req = g_malloc(sizeof(*req)); |
aliguori | 487414f | 2009-02-05 22:06:05 +0000 | [diff] [blame] | 120 | req->dev = s; |
Stefan Hajnoczi | de6c804 | 2010-05-14 22:52:30 +0100 | [diff] [blame] | 121 | req->qiov.size = 0; |
| 122 | req->next = NULL; |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 123 | return req; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) |
| 127 | { |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 128 | VirtIOBlockReq *req = virtio_blk_alloc_request(s); |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 129 | |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 130 | if (req != NULL) { |
| 131 | if (!virtqueue_pop(s->vq, &req->elem)) { |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 132 | g_free(req); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 133 | return NULL; |
| 134 | } |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | return req; |
| 138 | } |
| 139 | |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 140 | static void virtio_blk_handle_scsi(VirtIOBlockReq *req) |
| 141 | { |
Stefan Weil | 47ce9ef | 2012-05-22 23:23:32 +0200 | [diff] [blame] | 142 | #ifdef __linux__ |
Christoph Hellwig | 4277906 | 2010-01-13 13:30:32 +0100 | [diff] [blame] | 143 | int ret; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 144 | int i; |
Stefan Weil | 47ce9ef | 2012-05-22 23:23:32 +0200 | [diff] [blame] | 145 | #endif |
| 146 | int status = VIRTIO_BLK_S_OK; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 147 | |
| 148 | /* |
| 149 | * We require at least one output segment each for the virtio_blk_outhdr |
| 150 | * and the SCSI command block. |
| 151 | * |
| 152 | * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr |
| 153 | * and the sense buffer pointer in the input segments. |
| 154 | */ |
| 155 | if (req->elem.out_num < 2 || req->elem.in_num < 3) { |
| 156 | virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 157 | g_free(req); |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 158 | return; |
| 159 | } |
| 160 | |
| 161 | /* |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 162 | * The scsi inhdr is placed in the second-to-last input segment, just |
| 163 | * before the regular inhdr. |
| 164 | */ |
| 165 | req->scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 166 | |
Paolo Bonzini | a6c5c84 | 2012-05-16 12:54:06 +0200 | [diff] [blame] | 167 | if (!req->dev->blk->scsi) { |
Paolo Bonzini | f34e73c | 2012-05-16 12:54:03 +0200 | [diff] [blame] | 168 | status = VIRTIO_BLK_S_UNSUPP; |
| 169 | goto fail; |
| 170 | } |
| 171 | |
| 172 | /* |
| 173 | * No support for bidirection commands yet. |
| 174 | */ |
| 175 | if (req->elem.out_num > 2 && req->elem.in_num > 3) { |
| 176 | status = VIRTIO_BLK_S_UNSUPP; |
| 177 | goto fail; |
| 178 | } |
| 179 | |
| 180 | #ifdef __linux__ |
| 181 | struct sg_io_hdr hdr; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 182 | memset(&hdr, 0, sizeof(struct sg_io_hdr)); |
| 183 | hdr.interface_id = 'S'; |
| 184 | hdr.cmd_len = req->elem.out_sg[1].iov_len; |
| 185 | hdr.cmdp = req->elem.out_sg[1].iov_base; |
| 186 | hdr.dxfer_len = 0; |
| 187 | |
| 188 | if (req->elem.out_num > 2) { |
| 189 | /* |
| 190 | * If there are more than the minimally required 2 output segments |
| 191 | * there is write payload starting from the third iovec. |
| 192 | */ |
| 193 | hdr.dxfer_direction = SG_DXFER_TO_DEV; |
| 194 | hdr.iovec_count = req->elem.out_num - 2; |
| 195 | |
| 196 | for (i = 0; i < hdr.iovec_count; i++) |
| 197 | hdr.dxfer_len += req->elem.out_sg[i + 2].iov_len; |
| 198 | |
| 199 | hdr.dxferp = req->elem.out_sg + 2; |
| 200 | |
| 201 | } else if (req->elem.in_num > 3) { |
| 202 | /* |
| 203 | * If we have more than 3 input segments the guest wants to actually |
| 204 | * read data. |
| 205 | */ |
| 206 | hdr.dxfer_direction = SG_DXFER_FROM_DEV; |
| 207 | hdr.iovec_count = req->elem.in_num - 3; |
| 208 | for (i = 0; i < hdr.iovec_count; i++) |
| 209 | hdr.dxfer_len += req->elem.in_sg[i].iov_len; |
| 210 | |
| 211 | hdr.dxferp = req->elem.in_sg; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 212 | } else { |
| 213 | /* |
| 214 | * Some SCSI commands don't actually transfer any data. |
| 215 | */ |
| 216 | hdr.dxfer_direction = SG_DXFER_NONE; |
| 217 | } |
| 218 | |
| 219 | hdr.sbp = req->elem.in_sg[req->elem.in_num - 3].iov_base; |
| 220 | hdr.mx_sb_len = req->elem.in_sg[req->elem.in_num - 3].iov_len; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 221 | |
| 222 | ret = bdrv_ioctl(req->dev->bs, SG_IO, &hdr); |
| 223 | if (ret) { |
| 224 | status = VIRTIO_BLK_S_UNSUPP; |
Paolo Bonzini | f34e73c | 2012-05-16 12:54:03 +0200 | [diff] [blame] | 225 | goto fail; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 226 | } |
| 227 | |
Paolo Bonzini | 5bb2392 | 2011-11-02 13:19:40 +0100 | [diff] [blame] | 228 | /* |
| 229 | * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi) |
| 230 | * clear the masked_status field [hence status gets cleared too, see |
| 231 | * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED |
| 232 | * status has occurred. However they do set DRIVER_SENSE in driver_status |
| 233 | * field. Also a (sb_len_wr > 0) indicates there is a sense buffer. |
| 234 | */ |
| 235 | if (hdr.status == 0 && hdr.sb_len_wr > 0) { |
| 236 | hdr.status = CHECK_CONDITION; |
| 237 | } |
| 238 | |
| 239 | stl_p(&req->scsi->errors, |
| 240 | hdr.status | (hdr.msg_status << 8) | |
| 241 | (hdr.host_status << 16) | (hdr.driver_status << 24)); |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 242 | stl_p(&req->scsi->residual, hdr.resid); |
| 243 | stl_p(&req->scsi->sense_len, hdr.sb_len_wr); |
| 244 | stl_p(&req->scsi->data_len, hdr.dxfer_len); |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 245 | |
| 246 | virtio_blk_req_complete(req, status); |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 247 | g_free(req); |
Avi Kivity | 730a9c5 | 2012-08-06 15:49:03 +0300 | [diff] [blame] | 248 | return; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 249 | #else |
Paolo Bonzini | f34e73c | 2012-05-16 12:54:03 +0200 | [diff] [blame] | 250 | abort(); |
| 251 | #endif |
| 252 | |
| 253 | fail: |
| 254 | /* Just put anything nonzero so that the ioctl fails in the guest. */ |
| 255 | stl_p(&req->scsi->errors, 255); |
| 256 | virtio_blk_req_complete(req, status); |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 257 | g_free(req); |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 258 | } |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 259 | |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 260 | typedef struct MultiReqBuffer { |
| 261 | BlockRequest blkreq[32]; |
| 262 | unsigned int num_writes; |
| 263 | } MultiReqBuffer; |
| 264 | |
| 265 | static void virtio_submit_multiwrite(BlockDriverState *bs, MultiReqBuffer *mrb) |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 266 | { |
Kevin Wolf | 91553dc | 2009-09-09 17:53:38 +0200 | [diff] [blame] | 267 | int i, ret; |
Christoph Hellwig | 87b245d | 2009-08-13 16:49:56 +0200 | [diff] [blame] | 268 | |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 269 | if (!mrb->num_writes) { |
| 270 | return; |
| 271 | } |
| 272 | |
| 273 | ret = bdrv_aio_multiwrite(bs, mrb->blkreq, mrb->num_writes); |
Kevin Wolf | 91553dc | 2009-09-09 17:53:38 +0200 | [diff] [blame] | 274 | if (ret != 0) { |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 275 | for (i = 0; i < mrb->num_writes; i++) { |
| 276 | if (mrb->blkreq[i].error) { |
| 277 | virtio_blk_rw_complete(mrb->blkreq[i].opaque, -EIO); |
Kevin Wolf | 91553dc | 2009-09-09 17:53:38 +0200 | [diff] [blame] | 278 | } |
| 279 | } |
Christoph Hellwig | 87b245d | 2009-08-13 16:49:56 +0200 | [diff] [blame] | 280 | } |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 281 | |
| 282 | mrb->num_writes = 0; |
aliguori | d28a1b6 | 2009-03-28 17:46:14 +0000 | [diff] [blame] | 283 | } |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 284 | |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 285 | static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
Christoph Hellwig | aa659be | 2009-09-04 19:02:23 +0200 | [diff] [blame] | 286 | { |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 287 | bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH); |
| 288 | |
Christoph Hellwig | 618fbb8 | 2010-05-19 12:40:09 +0200 | [diff] [blame] | 289 | /* |
| 290 | * Make sure all outstanding writes are posted to the backing device. |
| 291 | */ |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 292 | virtio_submit_multiwrite(req->dev->bs, mrb); |
Paolo Bonzini | ad54ae8 | 2011-11-30 09:12:30 +0100 | [diff] [blame] | 293 | bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req); |
Christoph Hellwig | aa659be | 2009-09-04 19:02:23 +0200 | [diff] [blame] | 294 | } |
| 295 | |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 296 | static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
Kevin Wolf | 91553dc | 2009-09-09 17:53:38 +0200 | [diff] [blame] | 297 | { |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 298 | BlockRequest *blkreq; |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 299 | uint64_t sector; |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 300 | |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 301 | sector = ldq_p(&req->out->sector); |
Stefan Hajnoczi | 6d519a5 | 2010-05-22 18:15:08 +0100 | [diff] [blame] | 302 | |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 303 | bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE); |
| 304 | |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 305 | trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512); |
| 306 | |
| 307 | if (sector & req->dev->sector_mask) { |
Christoph Hellwig | 8cfacf0 | 2010-03-04 14:20:17 +0100 | [diff] [blame] | 308 | virtio_blk_rw_complete(req, -EIO); |
| 309 | return; |
| 310 | } |
Christoph Hellwig | 52c0502 | 2011-04-06 20:28:34 +0200 | [diff] [blame] | 311 | if (req->qiov.size % req->dev->conf->logical_block_size) { |
| 312 | virtio_blk_rw_complete(req, -EIO); |
| 313 | return; |
| 314 | } |
Christoph Hellwig | 8cfacf0 | 2010-03-04 14:20:17 +0100 | [diff] [blame] | 315 | |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 316 | if (mrb->num_writes == 32) { |
| 317 | virtio_submit_multiwrite(req->dev->bs, mrb); |
Kevin Wolf | 91553dc | 2009-09-09 17:53:38 +0200 | [diff] [blame] | 318 | } |
| 319 | |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 320 | blkreq = &mrb->blkreq[mrb->num_writes]; |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 321 | blkreq->sector = sector; |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 322 | blkreq->nb_sectors = req->qiov.size / BDRV_SECTOR_SIZE; |
| 323 | blkreq->qiov = &req->qiov; |
| 324 | blkreq->cb = virtio_blk_rw_complete; |
| 325 | blkreq->opaque = req; |
| 326 | blkreq->error = 0; |
Kevin Wolf | 91553dc | 2009-09-09 17:53:38 +0200 | [diff] [blame] | 327 | |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 328 | mrb->num_writes++; |
Kevin Wolf | 91553dc | 2009-09-09 17:53:38 +0200 | [diff] [blame] | 329 | } |
| 330 | |
aliguori | d28a1b6 | 2009-03-28 17:46:14 +0000 | [diff] [blame] | 331 | static void virtio_blk_handle_read(VirtIOBlockReq *req) |
| 332 | { |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 333 | uint64_t sector; |
Christoph Hellwig | 87b245d | 2009-08-13 16:49:56 +0200 | [diff] [blame] | 334 | |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 335 | sector = ldq_p(&req->out->sector); |
| 336 | |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 337 | bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ); |
| 338 | |
Stefan Hajnoczi | 81b6b9f | 2011-12-22 13:17:02 +0000 | [diff] [blame] | 339 | trace_virtio_blk_handle_read(req, sector, req->qiov.size / 512); |
| 340 | |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 341 | if (sector & req->dev->sector_mask) { |
Christoph Hellwig | 8cfacf0 | 2010-03-04 14:20:17 +0100 | [diff] [blame] | 342 | virtio_blk_rw_complete(req, -EIO); |
| 343 | return; |
| 344 | } |
Christoph Hellwig | 52c0502 | 2011-04-06 20:28:34 +0200 | [diff] [blame] | 345 | if (req->qiov.size % req->dev->conf->logical_block_size) { |
| 346 | virtio_blk_rw_complete(req, -EIO); |
| 347 | return; |
| 348 | } |
Paolo Bonzini | ad54ae8 | 2011-11-30 09:12:30 +0100 | [diff] [blame] | 349 | bdrv_aio_readv(req->dev->bs, sector, &req->qiov, |
| 350 | req->qiov.size / BDRV_SECTOR_SIZE, |
| 351 | virtio_blk_rw_complete, req); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 352 | } |
| 353 | |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 354 | static void virtio_blk_handle_request(VirtIOBlockReq *req, |
| 355 | MultiReqBuffer *mrb) |
| 356 | { |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 357 | uint32_t type; |
| 358 | |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 359 | if (req->elem.out_num < 1 || req->elem.in_num < 1) { |
Stefan Hajnoczi | 870cef1 | 2010-11-15 20:44:35 +0000 | [diff] [blame] | 360 | error_report("virtio-blk missing headers"); |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 361 | exit(1); |
| 362 | } |
| 363 | |
| 364 | if (req->elem.out_sg[0].iov_len < sizeof(*req->out) || |
| 365 | req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) { |
Stefan Hajnoczi | 870cef1 | 2010-11-15 20:44:35 +0000 | [diff] [blame] | 366 | error_report("virtio-blk header not in correct element"); |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 367 | exit(1); |
| 368 | } |
| 369 | |
| 370 | req->out = (void *)req->elem.out_sg[0].iov_base; |
| 371 | req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base; |
| 372 | |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 373 | type = ldl_p(&req->out->type); |
| 374 | |
| 375 | if (type & VIRTIO_BLK_T_FLUSH) { |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 376 | virtio_blk_handle_flush(req, mrb); |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 377 | } else if (type & VIRTIO_BLK_T_SCSI_CMD) { |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 378 | virtio_blk_handle_scsi(req); |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 379 | } else if (type & VIRTIO_BLK_T_GET_ID) { |
john cooper | 2930b31 | 2010-07-02 13:44:25 -0400 | [diff] [blame] | 380 | VirtIOBlock *s = req->dev; |
| 381 | |
Markus Armbruster | a8686a9 | 2011-06-20 11:35:18 +0200 | [diff] [blame] | 382 | /* |
| 383 | * NB: per existing s/n string convention the string is |
| 384 | * terminated by '\0' only when shorter than buffer. |
| 385 | */ |
| 386 | strncpy(req->elem.in_sg[0].iov_base, |
Paolo Bonzini | 12c5674 | 2012-05-16 12:54:05 +0200 | [diff] [blame] | 387 | s->blk->serial ? s->blk->serial : "", |
Markus Armbruster | a8686a9 | 2011-06-20 11:35:18 +0200 | [diff] [blame] | 388 | MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES)); |
john cooper | 2930b31 | 2010-07-02 13:44:25 -0400 | [diff] [blame] | 389 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
Christoph Hellwig | a597e79 | 2011-08-25 08:26:01 +0200 | [diff] [blame] | 390 | g_free(req); |
Aurelien Jarno | 92e3c2a | 2011-01-25 11:55:14 +0100 | [diff] [blame] | 391 | } else if (type & VIRTIO_BLK_T_OUT) { |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 392 | qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1], |
| 393 | req->elem.out_num - 1); |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 394 | virtio_blk_handle_write(req, mrb); |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 395 | } else { |
| 396 | qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0], |
| 397 | req->elem.in_num - 1); |
| 398 | virtio_blk_handle_read(req); |
| 399 | } |
| 400 | } |
| 401 | |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 402 | static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) |
| 403 | { |
| 404 | VirtIOBlock *s = to_virtio_blk(vdev); |
| 405 | VirtIOBlockReq *req; |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 406 | MultiReqBuffer mrb = { |
| 407 | .num_writes = 0, |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 408 | }; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 409 | |
| 410 | while ((req = virtio_blk_get_request(s))) { |
Kevin Wolf | bc6694d | 2010-01-27 13:12:34 +0100 | [diff] [blame] | 411 | virtio_blk_handle_request(req, &mrb); |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 412 | } |
Kevin Wolf | 91553dc | 2009-09-09 17:53:38 +0200 | [diff] [blame] | 413 | |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 414 | virtio_submit_multiwrite(s->bs, &mrb); |
Kevin Wolf | 91553dc | 2009-09-09 17:53:38 +0200 | [diff] [blame] | 415 | |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 416 | /* |
| 417 | * FIXME: Want to check for completions before returning to guest mode, |
| 418 | * so cached reads and writes are reported as quickly as possible. But |
| 419 | * that should be done in the generic block layer. |
| 420 | */ |
| 421 | } |
| 422 | |
Markus Armbruster | 213189a | 2009-07-28 14:33:41 -0400 | [diff] [blame] | 423 | static void virtio_blk_dma_restart_bh(void *opaque) |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 424 | { |
| 425 | VirtIOBlock *s = opaque; |
| 426 | VirtIOBlockReq *req = s->rq; |
Kevin Wolf | f1b5286 | 2010-01-27 13:12:35 +0100 | [diff] [blame] | 427 | MultiReqBuffer mrb = { |
| 428 | .num_writes = 0, |
Kevin Wolf | f1b5286 | 2010-01-27 13:12:35 +0100 | [diff] [blame] | 429 | }; |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 430 | |
Markus Armbruster | 213189a | 2009-07-28 14:33:41 -0400 | [diff] [blame] | 431 | qemu_bh_delete(s->bh); |
| 432 | s->bh = NULL; |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 433 | |
| 434 | s->rq = NULL; |
| 435 | |
| 436 | while (req) { |
Kevin Wolf | f1b5286 | 2010-01-27 13:12:35 +0100 | [diff] [blame] | 437 | virtio_blk_handle_request(req, &mrb); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 438 | req = req->next; |
| 439 | } |
Kevin Wolf | f1b5286 | 2010-01-27 13:12:35 +0100 | [diff] [blame] | 440 | |
Christoph Hellwig | c20fd87 | 2010-06-08 18:26:07 +0200 | [diff] [blame] | 441 | virtio_submit_multiwrite(s->bs, &mrb); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 442 | } |
| 443 | |
Luiz Capitulino | 1dfb4dd | 2011-07-29 14:26:33 -0300 | [diff] [blame] | 444 | static void virtio_blk_dma_restart_cb(void *opaque, int running, |
| 445 | RunState state) |
Markus Armbruster | 213189a | 2009-07-28 14:33:41 -0400 | [diff] [blame] | 446 | { |
| 447 | VirtIOBlock *s = opaque; |
| 448 | |
| 449 | if (!running) |
| 450 | return; |
| 451 | |
| 452 | if (!s->bh) { |
| 453 | s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s); |
| 454 | qemu_bh_schedule(s->bh); |
| 455 | } |
| 456 | } |
| 457 | |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 458 | static void virtio_blk_reset(VirtIODevice *vdev) |
| 459 | { |
| 460 | /* |
| 461 | * This should cancel pending requests, but can't do nicely until there |
| 462 | * are per-device request lists. |
| 463 | */ |
Stefan Hajnoczi | 922453b | 2011-11-30 12:23:43 +0000 | [diff] [blame] | 464 | bdrv_drain_all(); |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 465 | } |
| 466 | |
john cooper | bf01129 | 2009-06-22 14:26:51 -0400 | [diff] [blame] | 467 | /* coalesce internal state, copy to pci i/o region 0 |
| 468 | */ |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 469 | static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) |
| 470 | { |
| 471 | VirtIOBlock *s = to_virtio_blk(vdev); |
| 472 | struct virtio_blk_config blkcfg; |
| 473 | uint64_t capacity; |
Paolo Bonzini | 3a39514 | 2011-11-18 16:31:59 +0100 | [diff] [blame] | 474 | int blk_size = s->conf->logical_block_size; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 475 | |
| 476 | bdrv_get_geometry(s->bs, &capacity); |
Gerd Hoffmann | 5c5dafd | 2009-06-12 09:50:18 +0200 | [diff] [blame] | 477 | memset(&blkcfg, 0, sizeof(blkcfg)); |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 478 | stq_raw(&blkcfg.capacity, capacity); |
| 479 | stl_raw(&blkcfg.seg_max, 128 - 2); |
Markus Armbruster | e63e7fd | 2012-07-10 11:12:43 +0200 | [diff] [blame] | 480 | stw_raw(&blkcfg.cylinders, s->conf->cyls); |
Paolo Bonzini | 3a39514 | 2011-11-18 16:31:59 +0100 | [diff] [blame] | 481 | stl_raw(&blkcfg.blk_size, blk_size); |
| 482 | stw_raw(&blkcfg.min_io_size, s->conf->min_io_size / blk_size); |
| 483 | stw_raw(&blkcfg.opt_io_size, s->conf->opt_io_size / blk_size); |
Markus Armbruster | e63e7fd | 2012-07-10 11:12:43 +0200 | [diff] [blame] | 484 | blkcfg.heads = s->conf->heads; |
Christian Borntraeger | 136be99 | 2012-05-24 13:22:55 +0200 | [diff] [blame] | 485 | /* |
| 486 | * We must ensure that the block device capacity is a multiple of |
| 487 | * the logical block size. If that is not the case, lets use |
| 488 | * sector_mask to adopt the geometry to have a correct picture. |
| 489 | * For those devices where the capacity is ok for the given geometry |
| 490 | * we dont touch the sector value of the geometry, since some devices |
| 491 | * (like s390 dasd) need a specific value. Here the capacity is already |
| 492 | * cyls*heads*secs*blk_size and the sector value is not block size |
| 493 | * divided by 512 - instead it is the amount of blk_size blocks |
| 494 | * per track (cylinder). |
| 495 | */ |
Markus Armbruster | e63e7fd | 2012-07-10 11:12:43 +0200 | [diff] [blame] | 496 | if (bdrv_getlength(s->bs) / s->conf->heads / s->conf->secs % blk_size) { |
| 497 | blkcfg.sectors = s->conf->secs & ~s->sector_mask; |
Christian Borntraeger | 136be99 | 2012-05-24 13:22:55 +0200 | [diff] [blame] | 498 | } else { |
Markus Armbruster | e63e7fd | 2012-07-10 11:12:43 +0200 | [diff] [blame] | 499 | blkcfg.sectors = s->conf->secs; |
Christian Borntraeger | 136be99 | 2012-05-24 13:22:55 +0200 | [diff] [blame] | 500 | } |
Blue Swirl | c7085da | 2009-06-13 13:20:25 +0000 | [diff] [blame] | 501 | blkcfg.size_max = 0; |
Christoph Hellwig | 9752c37 | 2010-02-10 23:37:25 +0100 | [diff] [blame] | 502 | blkcfg.physical_block_exp = get_physical_block_exp(s->conf); |
| 503 | blkcfg.alignment_offset = 0; |
Paolo Bonzini | 13e3dce | 2012-08-09 16:07:19 +0200 | [diff] [blame] | 504 | blkcfg.wce = bdrv_enable_write_cache(s->bs); |
hch@lst.de | 37d5ddd | 2010-02-10 23:36:49 +0100 | [diff] [blame] | 505 | memcpy(config, &blkcfg, sizeof(struct virtio_blk_config)); |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 506 | } |
| 507 | |
Paolo Bonzini | 13e3dce | 2012-08-09 16:07:19 +0200 | [diff] [blame] | 508 | static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) |
| 509 | { |
| 510 | VirtIOBlock *s = to_virtio_blk(vdev); |
| 511 | struct virtio_blk_config blkcfg; |
| 512 | |
| 513 | memcpy(&blkcfg, config, sizeof(blkcfg)); |
| 514 | bdrv_set_enable_write_cache(s->bs, blkcfg.wce != 0); |
| 515 | } |
| 516 | |
Michael S. Tsirkin | 8172539 | 2010-01-10 13:52:53 +0200 | [diff] [blame] | 517 | static uint32_t virtio_blk_get_features(VirtIODevice *vdev, uint32_t features) |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 518 | { |
john cooper | bf01129 | 2009-06-22 14:26:51 -0400 | [diff] [blame] | 519 | VirtIOBlock *s = to_virtio_blk(vdev); |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 520 | |
| 521 | features |= (1 << VIRTIO_BLK_F_SEG_MAX); |
| 522 | features |= (1 << VIRTIO_BLK_F_GEOMETRY); |
Christoph Hellwig | 9752c37 | 2010-02-10 23:37:25 +0100 | [diff] [blame] | 523 | features |= (1 << VIRTIO_BLK_F_TOPOLOGY); |
Christoph Hellwig | 8cfacf0 | 2010-03-04 14:20:17 +0100 | [diff] [blame] | 524 | features |= (1 << VIRTIO_BLK_F_BLK_SIZE); |
Paolo Bonzini | a6c5c84 | 2012-05-16 12:54:06 +0200 | [diff] [blame] | 525 | features |= (1 << VIRTIO_BLK_F_SCSI); |
Christoph Hellwig | aa659be | 2009-09-04 19:02:23 +0200 | [diff] [blame] | 526 | |
| 527 | if (bdrv_enable_write_cache(s->bs)) |
Paolo Bonzini | 13e3dce | 2012-08-09 16:07:19 +0200 | [diff] [blame] | 528 | features |= (1 << VIRTIO_BLK_F_WCE); |
| 529 | |
Naphtali Sprei | c79662f | 2009-10-29 11:42:11 +0200 | [diff] [blame] | 530 | if (bdrv_is_read_only(s->bs)) |
| 531 | features |= 1 << VIRTIO_BLK_F_RO; |
Christoph Hellwig | 1063b8b | 2009-04-27 10:29:14 +0200 | [diff] [blame] | 532 | |
| 533 | return features; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 534 | } |
| 535 | |
Paolo Bonzini | 9315cbf | 2012-08-09 16:07:20 +0200 | [diff] [blame] | 536 | static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) |
| 537 | { |
| 538 | VirtIOBlock *s = to_virtio_blk(vdev); |
| 539 | uint32_t features; |
| 540 | |
| 541 | if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { |
| 542 | return; |
| 543 | } |
| 544 | |
| 545 | features = vdev->guest_features; |
| 546 | bdrv_set_enable_write_cache(s->bs, !!(features & (1 << VIRTIO_BLK_F_WCE))); |
| 547 | } |
| 548 | |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 549 | static void virtio_blk_save(QEMUFile *f, void *opaque) |
| 550 | { |
| 551 | VirtIOBlock *s = opaque; |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 552 | VirtIOBlockReq *req = s->rq; |
| 553 | |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 554 | virtio_save(&s->vdev, f); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 555 | |
| 556 | while (req) { |
| 557 | qemu_put_sbyte(f, 1); |
| 558 | qemu_put_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); |
| 559 | req = req->next; |
| 560 | } |
| 561 | qemu_put_sbyte(f, 0); |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 562 | } |
| 563 | |
| 564 | static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id) |
| 565 | { |
| 566 | VirtIOBlock *s = opaque; |
Orit Wassermann | 2a633c4 | 2012-05-16 12:21:35 +0200 | [diff] [blame] | 567 | int ret; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 568 | |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 569 | if (version_id != 2) |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 570 | return -EINVAL; |
| 571 | |
Orit Wassermann | 2a633c4 | 2012-05-16 12:21:35 +0200 | [diff] [blame] | 572 | ret = virtio_load(&s->vdev, f); |
| 573 | if (ret) { |
| 574 | return ret; |
| 575 | } |
| 576 | |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 577 | while (qemu_get_sbyte(f)) { |
| 578 | VirtIOBlockReq *req = virtio_blk_alloc_request(s); |
| 579 | qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); |
| 580 | req->next = s->rq; |
Yoshiaki Tamura | 20a81e4 | 2010-06-21 17:50:01 +0900 | [diff] [blame] | 581 | s->rq = req; |
Kevin Wolf | b6a4805 | 2010-08-03 16:57:02 +0200 | [diff] [blame] | 582 | |
| 583 | virtqueue_map_sg(req->elem.in_sg, req->elem.in_addr, |
| 584 | req->elem.in_num, 1); |
| 585 | virtqueue_map_sg(req->elem.out_sg, req->elem.out_addr, |
| 586 | req->elem.out_num, 0); |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 587 | } |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 588 | |
| 589 | return 0; |
| 590 | } |
| 591 | |
Markus Armbruster | 145feb1 | 2011-08-03 15:07:42 +0200 | [diff] [blame] | 592 | static void virtio_blk_resize(void *opaque) |
Christoph Hellwig | e5051fc | 2011-01-24 13:32:51 +0100 | [diff] [blame] | 593 | { |
| 594 | VirtIOBlock *s = opaque; |
| 595 | |
Markus Armbruster | 145feb1 | 2011-08-03 15:07:42 +0200 | [diff] [blame] | 596 | virtio_notify_config(&s->vdev); |
Christoph Hellwig | e5051fc | 2011-01-24 13:32:51 +0100 | [diff] [blame] | 597 | } |
| 598 | |
Markus Armbruster | 0e49de5 | 2011-08-03 15:07:41 +0200 | [diff] [blame] | 599 | static const BlockDevOps virtio_block_ops = { |
Markus Armbruster | 145feb1 | 2011-08-03 15:07:42 +0200 | [diff] [blame] | 600 | .resize_cb = virtio_blk_resize, |
Markus Armbruster | 0e49de5 | 2011-08-03 15:07:41 +0200 | [diff] [blame] | 601 | }; |
| 602 | |
Paolo Bonzini | 12c5674 | 2012-05-16 12:54:05 +0200 | [diff] [blame] | 603 | VirtIODevice *virtio_blk_init(DeviceState *dev, VirtIOBlkConf *blk) |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 604 | { |
| 605 | VirtIOBlock *s; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 606 | static int virtio_blk_id; |
| 607 | |
Paolo Bonzini | 12c5674 | 2012-05-16 12:54:05 +0200 | [diff] [blame] | 608 | if (!blk->conf.bs) { |
Markus Armbruster | 6a84cb1 | 2011-12-21 11:37:57 +0100 | [diff] [blame] | 609 | error_report("drive property not set"); |
Markus Armbruster | d75d25e | 2010-07-06 14:37:43 +0200 | [diff] [blame] | 610 | return NULL; |
| 611 | } |
Paolo Bonzini | 12c5674 | 2012-05-16 12:54:05 +0200 | [diff] [blame] | 612 | if (!bdrv_is_inserted(blk->conf.bs)) { |
Markus Armbruster | 98f28ad | 2010-07-06 14:37:44 +0200 | [diff] [blame] | 613 | error_report("Device needs media, but drive is empty"); |
| 614 | return NULL; |
| 615 | } |
Markus Armbruster | d75d25e | 2010-07-06 14:37:43 +0200 | [diff] [blame] | 616 | |
Markus Armbruster | 911525d | 2012-07-11 15:08:37 +0200 | [diff] [blame] | 617 | blkconf_serial(&blk->conf, &blk->serial); |
Markus Armbruster | b7eb0c9 | 2012-07-11 15:08:39 +0200 | [diff] [blame] | 618 | if (blkconf_geometry(&blk->conf, NULL, 65535, 255, 255) < 0) { |
| 619 | return NULL; |
| 620 | } |
Markus Armbruster | a8686a9 | 2011-06-20 11:35:18 +0200 | [diff] [blame] | 621 | |
Paul Brook | 53c25ce | 2009-05-18 14:51:59 +0100 | [diff] [blame] | 622 | s = (VirtIOBlock *)virtio_common_init("virtio-blk", VIRTIO_ID_BLOCK, |
hch@lst.de | 37d5ddd | 2010-02-10 23:36:49 +0100 | [diff] [blame] | 623 | sizeof(struct virtio_blk_config), |
Paul Brook | 53c25ce | 2009-05-18 14:51:59 +0100 | [diff] [blame] | 624 | sizeof(VirtIOBlock)); |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 625 | |
| 626 | s->vdev.get_config = virtio_blk_update_config; |
Paolo Bonzini | 13e3dce | 2012-08-09 16:07:19 +0200 | [diff] [blame] | 627 | s->vdev.set_config = virtio_blk_set_config; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 628 | s->vdev.get_features = virtio_blk_get_features; |
Paolo Bonzini | 9315cbf | 2012-08-09 16:07:20 +0200 | [diff] [blame] | 629 | s->vdev.set_status = virtio_blk_set_status; |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 630 | s->vdev.reset = virtio_blk_reset; |
Paolo Bonzini | 12c5674 | 2012-05-16 12:54:05 +0200 | [diff] [blame] | 631 | s->bs = blk->conf.bs; |
| 632 | s->conf = &blk->conf; |
| 633 | s->blk = blk; |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 634 | s->rq = NULL; |
Jes Sorensen | 1573a35 | 2010-05-27 16:20:33 +0200 | [diff] [blame] | 635 | s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1; |
Markus Armbruster | e63e7fd | 2012-07-10 11:12:43 +0200 | [diff] [blame] | 636 | |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 637 | s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output); |
| 638 | |
aliguori | 869a5c6 | 2009-01-22 19:52:25 +0000 | [diff] [blame] | 639 | qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); |
Alex Williamson | 9d0d313 | 2010-07-20 11:14:22 -0600 | [diff] [blame] | 640 | s->qdev = dev; |
Alex Williamson | 0be71e3 | 2010-06-25 11:09:07 -0600 | [diff] [blame] | 641 | register_savevm(dev, "virtio-blk", virtio_blk_id++, 2, |
aliguori | 6e02c38 | 2008-12-04 19:52:44 +0000 | [diff] [blame] | 642 | virtio_blk_save, virtio_blk_load, s); |
Markus Armbruster | 0e49de5 | 2011-08-03 15:07:41 +0200 | [diff] [blame] | 643 | bdrv_set_dev_ops(s->bs, &virtio_block_ops, s); |
Paolo Bonzini | 12c5674 | 2012-05-16 12:54:05 +0200 | [diff] [blame] | 644 | bdrv_set_buffer_alignment(s->bs, s->conf->logical_block_size); |
Paul Brook | 07e3af9 | 2009-05-14 22:35:08 +0100 | [diff] [blame] | 645 | |
Luiz Capitulino | af239a6 | 2011-09-26 17:43:51 -0300 | [diff] [blame] | 646 | bdrv_iostatus_enable(s->bs); |
Paolo Bonzini | 12c5674 | 2012-05-16 12:54:05 +0200 | [diff] [blame] | 647 | add_boot_device_path(s->conf->bootindex, dev, "/disk@0,0"); |
Gleb Natapov | 1ca4d09 | 2010-12-08 13:35:05 +0200 | [diff] [blame] | 648 | |
Paul Brook | 53c25ce | 2009-05-18 14:51:59 +0100 | [diff] [blame] | 649 | return &s->vdev; |
Paul Brook | 07e3af9 | 2009-05-14 22:35:08 +0100 | [diff] [blame] | 650 | } |
Alex Williamson | 9d0d313 | 2010-07-20 11:14:22 -0600 | [diff] [blame] | 651 | |
| 652 | void virtio_blk_exit(VirtIODevice *vdev) |
| 653 | { |
| 654 | VirtIOBlock *s = to_virtio_blk(vdev); |
| 655 | unregister_savevm(s->qdev, "virtio-blk", s); |
Paolo Bonzini | 0e47931 | 2012-05-16 12:54:04 +0200 | [diff] [blame] | 656 | blockdev_mark_auto_del(s->bs); |
Amit Shah | d92551f | 2011-07-27 14:00:30 +0530 | [diff] [blame] | 657 | virtio_cleanup(vdev); |
Alex Williamson | 9d0d313 | 2010-07-20 11:14:22 -0600 | [diff] [blame] | 658 | } |