blob: 272632f3671988e479d0c3473efbb8c12b1bb075 [file] [log] [blame]
aliguori244ab902009-02-05 21:23:50 +00001/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
Paolo Bonzini9c17d612012-12-17 18:20:04 +010010#include "sysemu/dma.h"
Kevin Wolfc57c4652011-11-24 06:15:28 -050011#include "trace.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010012#include "qemu/range.h"
13#include "qemu/thread.h"
aliguori244ab902009-02-05 21:23:50 +000014
David Gibsone5332e62012-06-27 14:50:43 +100015/* #define DEBUG_IOMMU */
16
Avi Kivityb90600e2012-10-03 16:42:37 +020017static void do_dma_memory_set(AddressSpace *as,
18 dma_addr_t addr, uint8_t c, dma_addr_t len)
David Gibsond86a77f2012-06-27 14:50:38 +100019{
20#define FILLBUF_SIZE 512
21 uint8_t fillbuf[FILLBUF_SIZE];
22 int l;
23
24 memset(fillbuf, c, FILLBUF_SIZE);
25 while (len > 0) {
26 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
Avi Kivityb90600e2012-10-03 16:42:37 +020027 address_space_rw(as, addr, fillbuf, l, true);
Benjamin Herrenschmidtbc9b78d2012-08-14 17:41:47 +100028 len -= l;
29 addr += l;
David Gibsond86a77f2012-06-27 14:50:38 +100030 }
David Gibsone5332e62012-06-27 14:50:43 +100031}
32
33int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
34{
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +100035 dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE);
36
David Gibsone5332e62012-06-27 14:50:43 +100037 if (dma_has_iommu(dma)) {
38 return iommu_dma_memory_set(dma, addr, c, len);
39 }
Avi Kivityb90600e2012-10-03 16:42:37 +020040 do_dma_memory_set(dma->as, addr, c, len);
David Gibsone5332e62012-06-27 14:50:43 +100041
David Gibsond86a77f2012-06-27 14:50:38 +100042 return 0;
43}
44
David Gibsonc65bcef2012-06-27 14:50:40 +100045void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma)
aliguori244ab902009-02-05 21:23:50 +000046{
Anthony Liguori7267c092011-08-20 22:09:37 -050047 qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
aliguori244ab902009-02-05 21:23:50 +000048 qsg->nsg = 0;
49 qsg->nalloc = alloc_hint;
50 qsg->size = 0;
David Gibsonc65bcef2012-06-27 14:50:40 +100051 qsg->dma = dma;
aliguori244ab902009-02-05 21:23:50 +000052}
53
David Gibsond3231182011-10-31 17:06:46 +110054void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
aliguori244ab902009-02-05 21:23:50 +000055{
56 if (qsg->nsg == qsg->nalloc) {
57 qsg->nalloc = 2 * qsg->nalloc + 1;
Anthony Liguori7267c092011-08-20 22:09:37 -050058 qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
aliguori244ab902009-02-05 21:23:50 +000059 }
60 qsg->sg[qsg->nsg].base = base;
61 qsg->sg[qsg->nsg].len = len;
62 qsg->size += len;
63 ++qsg->nsg;
64}
65
66void qemu_sglist_destroy(QEMUSGList *qsg)
67{
Anthony Liguori7267c092011-08-20 22:09:37 -050068 g_free(qsg->sg);
Jason Baronea8d82a2012-08-03 15:57:10 -040069 memset(qsg, 0, sizeof(*qsg));
aliguori244ab902009-02-05 21:23:50 +000070}
71
aliguori59a703e2009-02-05 21:23:58 +000072typedef struct {
aliguori37b78422009-03-20 18:26:16 +000073 BlockDriverAIOCB common;
aliguori59a703e2009-02-05 21:23:58 +000074 BlockDriverState *bs;
75 BlockDriverAIOCB *acb;
76 QEMUSGList *sg;
77 uint64_t sector_num;
David Gibson43cf8ae2012-03-27 13:42:23 +110078 DMADirection dir;
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +020079 bool in_cancel;
aliguori59a703e2009-02-05 21:23:58 +000080 int sg_cur_index;
David Gibsond3231182011-10-31 17:06:46 +110081 dma_addr_t sg_cur_byte;
aliguori59a703e2009-02-05 21:23:58 +000082 QEMUIOVector iov;
83 QEMUBH *bh;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +020084 DMAIOFunc *io_func;
aliguori37b78422009-03-20 18:26:16 +000085} DMAAIOCB;
aliguori59a703e2009-02-05 21:23:58 +000086
87static void dma_bdrv_cb(void *opaque, int ret);
88
89static void reschedule_dma(void *opaque)
90{
aliguori37b78422009-03-20 18:26:16 +000091 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
aliguori59a703e2009-02-05 21:23:58 +000092
93 qemu_bh_delete(dbs->bh);
94 dbs->bh = NULL;
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +020095 dma_bdrv_cb(dbs, 0);
aliguori59a703e2009-02-05 21:23:58 +000096}
97
98static void continue_after_map_failure(void *opaque)
99{
aliguori37b78422009-03-20 18:26:16 +0000100 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
aliguori59a703e2009-02-05 21:23:58 +0000101
102 dbs->bh = qemu_bh_new(reschedule_dma, dbs);
103 qemu_bh_schedule(dbs->bh);
104}
105
aliguori7403b142009-03-28 16:11:25 +0000106static void dma_bdrv_unmap(DMAAIOCB *dbs)
aliguori59a703e2009-02-05 21:23:58 +0000107{
aliguori59a703e2009-02-05 21:23:58 +0000108 int i;
109
aliguori59a703e2009-02-05 21:23:58 +0000110 for (i = 0; i < dbs->iov.niov; ++i) {
David Gibsonc65bcef2012-06-27 14:50:40 +1000111 dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base,
112 dbs->iov.iov[i].iov_len, dbs->dir,
113 dbs->iov.iov[i].iov_len);
aliguori59a703e2009-02-05 21:23:58 +0000114 }
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200115 qemu_iovec_reset(&dbs->iov);
116}
117
118static void dma_complete(DMAAIOCB *dbs, int ret)
119{
Kevin Wolfc57c4652011-11-24 06:15:28 -0500120 trace_dma_complete(dbs, ret, dbs->common.cb);
121
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200122 dma_bdrv_unmap(dbs);
123 if (dbs->common.cb) {
124 dbs->common.cb(dbs->common.opaque, ret);
125 }
126 qemu_iovec_destroy(&dbs->iov);
127 if (dbs->bh) {
128 qemu_bh_delete(dbs->bh);
129 dbs->bh = NULL;
130 }
131 if (!dbs->in_cancel) {
132 /* Requests may complete while dma_aio_cancel is in progress. In
133 * this case, the AIOCB should not be released because it is still
134 * referenced by dma_aio_cancel. */
135 qemu_aio_release(dbs);
136 }
aliguori7403b142009-03-28 16:11:25 +0000137}
138
blueswir1856ae5c2009-04-07 17:57:09 +0000139static void dma_bdrv_cb(void *opaque, int ret)
aliguori7403b142009-03-28 16:11:25 +0000140{
141 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
David Gibsonc65bcef2012-06-27 14:50:40 +1000142 dma_addr_t cur_addr, cur_len;
aliguori7403b142009-03-28 16:11:25 +0000143 void *mem;
144
Kevin Wolfc57c4652011-11-24 06:15:28 -0500145 trace_dma_bdrv_cb(dbs, ret);
146
aliguori7403b142009-03-28 16:11:25 +0000147 dbs->acb = NULL;
148 dbs->sector_num += dbs->iov.size / 512;
149 dma_bdrv_unmap(dbs);
aliguori59a703e2009-02-05 21:23:58 +0000150
151 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200152 dma_complete(dbs, ret);
aliguori59a703e2009-02-05 21:23:58 +0000153 return;
154 }
155
156 while (dbs->sg_cur_index < dbs->sg->nsg) {
157 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
158 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
David Gibsonc65bcef2012-06-27 14:50:40 +1000159 mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir);
aliguori59a703e2009-02-05 21:23:58 +0000160 if (!mem)
161 break;
162 qemu_iovec_add(&dbs->iov, mem, cur_len);
163 dbs->sg_cur_byte += cur_len;
164 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
165 dbs->sg_cur_byte = 0;
166 ++dbs->sg_cur_index;
167 }
168 }
169
170 if (dbs->iov.size == 0) {
Kevin Wolfc57c4652011-11-24 06:15:28 -0500171 trace_dma_map_wait(dbs);
aliguori59a703e2009-02-05 21:23:58 +0000172 cpu_register_map_client(dbs, continue_after_map_failure);
173 return;
174 }
175
Christoph Hellwigcb144cc2011-05-19 10:57:59 +0200176 dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
177 dbs->iov.size / 512, dma_bdrv_cb, dbs);
Paolo Bonzini6bee44e2011-11-14 17:50:52 +0100178 assert(dbs->acb);
aliguori59a703e2009-02-05 21:23:58 +0000179}
180
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200181static void dma_aio_cancel(BlockDriverAIOCB *acb)
182{
183 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
184
Kevin Wolfc57c4652011-11-24 06:15:28 -0500185 trace_dma_aio_cancel(dbs);
186
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200187 if (dbs->acb) {
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200188 BlockDriverAIOCB *acb = dbs->acb;
189 dbs->acb = NULL;
190 dbs->in_cancel = true;
191 bdrv_aio_cancel(acb);
192 dbs->in_cancel = false;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200193 }
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200194 dbs->common.cb = NULL;
195 dma_complete(dbs, 0);
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200196}
197
Stefan Hajnoczid7331be2012-10-31 16:34:37 +0100198static const AIOCBInfo dma_aiocb_info = {
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200199 .aiocb_size = sizeof(DMAAIOCB),
200 .cancel = dma_aio_cancel,
201};
202
Christoph Hellwigcb144cc2011-05-19 10:57:59 +0200203BlockDriverAIOCB *dma_bdrv_io(
aliguori59a703e2009-02-05 21:23:58 +0000204 BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
Christoph Hellwigcb144cc2011-05-19 10:57:59 +0200205 DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
David Gibson43cf8ae2012-03-27 13:42:23 +1100206 void *opaque, DMADirection dir)
aliguori59a703e2009-02-05 21:23:58 +0000207{
Stefan Hajnoczid7331be2012-10-31 16:34:37 +0100208 DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque);
aliguori59a703e2009-02-05 21:23:58 +0000209
David Gibson43cf8ae2012-03-27 13:42:23 +1100210 trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
Kevin Wolfc57c4652011-11-24 06:15:28 -0500211
aliguori37b78422009-03-20 18:26:16 +0000212 dbs->acb = NULL;
aliguori59a703e2009-02-05 21:23:58 +0000213 dbs->bs = bs;
aliguori59a703e2009-02-05 21:23:58 +0000214 dbs->sg = sg;
215 dbs->sector_num = sector_num;
216 dbs->sg_cur_index = 0;
217 dbs->sg_cur_byte = 0;
David Gibson43cf8ae2012-03-27 13:42:23 +1100218 dbs->dir = dir;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +0200219 dbs->io_func = io_func;
aliguori59a703e2009-02-05 21:23:58 +0000220 dbs->bh = NULL;
221 qemu_iovec_init(&dbs->iov, sg->nsg);
222 dma_bdrv_cb(dbs, 0);
aliguori37b78422009-03-20 18:26:16 +0000223 return &dbs->common;
aliguori59a703e2009-02-05 21:23:58 +0000224}
225
226
227BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
228 QEMUSGList *sg, uint64_t sector,
229 void (*cb)(void *opaque, int ret), void *opaque)
230{
David Gibson43cf8ae2012-03-27 13:42:23 +1100231 return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
232 DMA_DIRECTION_FROM_DEVICE);
aliguori59a703e2009-02-05 21:23:58 +0000233}
234
235BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
236 QEMUSGList *sg, uint64_t sector,
237 void (*cb)(void *opaque, int ret), void *opaque)
238{
David Gibson43cf8ae2012-03-27 13:42:23 +1100239 return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
240 DMA_DIRECTION_TO_DEVICE);
aliguori59a703e2009-02-05 21:23:58 +0000241}
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200242
243
David Gibsonc65bcef2012-06-27 14:50:40 +1000244static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
245 DMADirection dir)
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200246{
247 uint64_t resid;
248 int sg_cur_index;
249
250 resid = sg->size;
251 sg_cur_index = 0;
252 len = MIN(len, resid);
253 while (len > 0) {
254 ScatterGatherEntry entry = sg->sg[sg_cur_index++];
255 int32_t xfer = MIN(len, entry.len);
David Gibsonc65bcef2012-06-27 14:50:40 +1000256 dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200257 ptr += xfer;
258 len -= xfer;
259 resid -= xfer;
260 }
261
262 return resid;
263}
264
265uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
266{
David Gibsonc65bcef2012-06-27 14:50:40 +1000267 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200268}
269
270uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
271{
David Gibsonc65bcef2012-06-27 14:50:40 +1000272 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200273}
Paolo Bonzini84a69352011-09-05 14:20:29 +0200274
275void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
276 QEMUSGList *sg, enum BlockAcctType type)
277{
278 bdrv_acct_start(bs, cookie, sg->size, type);
279}
David Gibsone5332e62012-06-27 14:50:43 +1000280
281bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
282 DMADirection dir)
283{
Avi Kivitya8170e52012-10-23 12:30:10 +0200284 hwaddr paddr, plen;
David Gibsone5332e62012-06-27 14:50:43 +1000285
286#ifdef DEBUG_IOMMU
287 fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
288 " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
289#endif
290
291 while (len) {
292 if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) {
293 return false;
294 }
295
296 /* The translation might be valid for larger regions. */
297 if (plen > len) {
298 plen = len;
299 }
300
301 len -= plen;
302 addr += plen;
303 }
304
305 return true;
306}
307
308int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
309 void *buf, dma_addr_t len, DMADirection dir)
310{
Avi Kivitya8170e52012-10-23 12:30:10 +0200311 hwaddr paddr, plen;
David Gibsone5332e62012-06-27 14:50:43 +1000312 int err;
313
314#ifdef DEBUG_IOMMU
315 fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x"
316 DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
317#endif
318
319 while (len) {
320 err = dma->translate(dma, addr, &paddr, &plen, dir);
321 if (err) {
322 /*
323 * In case of failure on reads from the guest, we clean the
324 * destination buffer so that a device that doesn't test
325 * for errors will not expose qemu internal memory.
326 */
327 memset(buf, 0, len);
328 return -1;
329 }
330
331 /* The translation might be valid for larger regions. */
332 if (plen > len) {
333 plen = len;
334 }
335
Avi Kivityb90600e2012-10-03 16:42:37 +0200336 address_space_rw(dma->as, paddr, buf, plen, dir == DMA_DIRECTION_FROM_DEVICE);
David Gibsone5332e62012-06-27 14:50:43 +1000337
338 len -= plen;
339 addr += plen;
340 buf += plen;
341 }
342
343 return 0;
344}
345
346int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
347 dma_addr_t len)
348{
Avi Kivitya8170e52012-10-23 12:30:10 +0200349 hwaddr paddr, plen;
David Gibsone5332e62012-06-27 14:50:43 +1000350 int err;
351
352#ifdef DEBUG_IOMMU
353 fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
354 " len=0x" DMA_ADDR_FMT "\n", dma, addr, len);
355#endif
356
357 while (len) {
358 err = dma->translate(dma, addr, &paddr, &plen,
359 DMA_DIRECTION_FROM_DEVICE);
360 if (err) {
361 return err;
362 }
363
364 /* The translation might be valid for larger regions. */
365 if (plen > len) {
366 plen = len;
367 }
368
Avi Kivityb90600e2012-10-03 16:42:37 +0200369 do_dma_memory_set(dma->as, paddr, c, plen);
David Gibsone5332e62012-06-27 14:50:43 +1000370
371 len -= plen;
372 addr += plen;
373 }
374
375 return 0;
376}
377
Avi Kivityb90600e2012-10-03 16:42:37 +0200378void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
David Gibsone5332e62012-06-27 14:50:43 +1000379 DMAMapFunc map, DMAUnmapFunc unmap)
380{
381#ifdef DEBUG_IOMMU
382 fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
383 dma, translate, map, unmap);
384#endif
Avi Kivityb90600e2012-10-03 16:42:37 +0200385 dma->as = as;
David Gibsone5332e62012-06-27 14:50:43 +1000386 dma->translate = translate;
387 dma->map = map;
388 dma->unmap = unmap;
389}
390
391void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
392 DMADirection dir)
393{
394 int err;
Avi Kivitya8170e52012-10-23 12:30:10 +0200395 hwaddr paddr, plen;
David Gibsone5332e62012-06-27 14:50:43 +1000396 void *buf;
397
398 if (dma->map) {
399 return dma->map(dma, addr, len, dir);
400 }
401
402 plen = *len;
403 err = dma->translate(dma, addr, &paddr, &plen, dir);
404 if (err) {
405 return NULL;
406 }
407
408 /*
409 * If this is true, the virtual region is contiguous,
410 * but the translated physical region isn't. We just
Avi Kivityb90600e2012-10-03 16:42:37 +0200411 * clamp *len, much like address_space_map() does.
David Gibsone5332e62012-06-27 14:50:43 +1000412 */
413 if (plen < *len) {
414 *len = plen;
415 }
416
Avi Kivityb90600e2012-10-03 16:42:37 +0200417 buf = address_space_map(dma->as, paddr, &plen, dir == DMA_DIRECTION_FROM_DEVICE);
David Gibsone5332e62012-06-27 14:50:43 +1000418 *len = plen;
419
420 return buf;
421}
422
423void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
424 DMADirection dir, dma_addr_t access_len)
425{
426 if (dma->unmap) {
427 dma->unmap(dma, buffer, len, dir, access_len);
428 return;
429 }
430
Avi Kivityb90600e2012-10-03 16:42:37 +0200431 address_space_unmap(dma->as, buffer, len, dir == DMA_DIRECTION_FROM_DEVICE,
432 access_len);
David Gibsone5332e62012-06-27 14:50:43 +1000433
434}