blob: d3871dc61ea2daade45e8d0c7074e02792f4a49a [file] [log] [blame]
aliguori244ab902009-02-05 21:23:50 +00001/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
Peter Maydelld38ea872016-01-29 17:50:05 +000010#include "qemu/osdep.h"
Markus Armbruster4be74632014-10-07 13:59:18 +020011#include "sysemu/block-backend.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010012#include "sysemu/dma.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000013#include "trace-root.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010014#include "qemu/thread.h"
Alex Bligh6a1751b2013-08-21 16:02:47 +010015#include "qemu/main-loop.h"
aliguori244ab902009-02-05 21:23:50 +000016
David Gibsone5332e62012-06-27 14:50:43 +100017/* #define DEBUG_IOMMU */
18
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020019int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
David Gibsond86a77f2012-06-27 14:50:38 +100020{
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020021 dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
Paolo Bonzini24addbc2013-04-10 17:49:04 +020022
David Gibsond86a77f2012-06-27 14:50:38 +100023#define FILLBUF_SIZE 512
24 uint8_t fillbuf[FILLBUF_SIZE];
25 int l;
Paolo Bonzini24addbc2013-04-10 17:49:04 +020026 bool error = false;
David Gibsond86a77f2012-06-27 14:50:38 +100027
28 memset(fillbuf, c, FILLBUF_SIZE);
29 while (len > 0) {
30 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
Peter Maydell5c9eb022015-04-26 16:49:24 +010031 error |= address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
32 fillbuf, l, true);
Benjamin Herrenschmidtbc9b78d2012-08-14 17:41:47 +100033 len -= l;
34 addr += l;
David Gibsond86a77f2012-06-27 14:50:38 +100035 }
David Gibsone5332e62012-06-27 14:50:43 +100036
Paolo Bonzini24addbc2013-04-10 17:49:04 +020037 return error;
David Gibsond86a77f2012-06-27 14:50:38 +100038}
39
Paolo Bonzinif487b672013-06-03 14:17:19 +020040void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
41 AddressSpace *as)
aliguori244ab902009-02-05 21:23:50 +000042{
Anthony Liguori7267c092011-08-20 22:09:37 -050043 qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
aliguori244ab902009-02-05 21:23:50 +000044 qsg->nsg = 0;
45 qsg->nalloc = alloc_hint;
46 qsg->size = 0;
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020047 qsg->as = as;
Paolo Bonzinif487b672013-06-03 14:17:19 +020048 qsg->dev = dev;
49 object_ref(OBJECT(dev));
aliguori244ab902009-02-05 21:23:50 +000050}
51
David Gibsond3231182011-10-31 17:06:46 +110052void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
aliguori244ab902009-02-05 21:23:50 +000053{
54 if (qsg->nsg == qsg->nalloc) {
55 qsg->nalloc = 2 * qsg->nalloc + 1;
Anthony Liguori7267c092011-08-20 22:09:37 -050056 qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
aliguori244ab902009-02-05 21:23:50 +000057 }
58 qsg->sg[qsg->nsg].base = base;
59 qsg->sg[qsg->nsg].len = len;
60 qsg->size += len;
61 ++qsg->nsg;
62}
63
64void qemu_sglist_destroy(QEMUSGList *qsg)
65{
Paolo Bonzinif487b672013-06-03 14:17:19 +020066 object_unref(OBJECT(qsg->dev));
Anthony Liguori7267c092011-08-20 22:09:37 -050067 g_free(qsg->sg);
Jason Baronea8d82a2012-08-03 15:57:10 -040068 memset(qsg, 0, sizeof(*qsg));
aliguori244ab902009-02-05 21:23:50 +000069}
70
aliguori59a703e2009-02-05 21:23:58 +000071typedef struct {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020072 BlockAIOCB common;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +020073 AioContext *ctx;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020074 BlockAIOCB *acb;
aliguori59a703e2009-02-05 21:23:58 +000075 QEMUSGList *sg;
Mark Cave-Ayland99868af2016-10-27 16:29:13 -040076 uint32_t align;
Eric Blaked4f510e2016-05-06 10:26:31 -060077 uint64_t offset;
David Gibson43cf8ae2012-03-27 13:42:23 +110078 DMADirection dir;
aliguori59a703e2009-02-05 21:23:58 +000079 int sg_cur_index;
David Gibsond3231182011-10-31 17:06:46 +110080 dma_addr_t sg_cur_byte;
aliguori59a703e2009-02-05 21:23:58 +000081 QEMUIOVector iov;
82 QEMUBH *bh;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +020083 DMAIOFunc *io_func;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +020084 void *io_func_opaque;
aliguori37b78422009-03-20 18:26:16 +000085} DMAAIOCB;
aliguori59a703e2009-02-05 21:23:58 +000086
Markus Armbruster4be74632014-10-07 13:59:18 +020087static void dma_blk_cb(void *opaque, int ret);
aliguori59a703e2009-02-05 21:23:58 +000088
89static void reschedule_dma(void *opaque)
90{
aliguori37b78422009-03-20 18:26:16 +000091 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
aliguori59a703e2009-02-05 21:23:58 +000092
Paolo Bonzini539343c2019-07-29 23:34:16 +020093 assert(!dbs->acb && dbs->bh);
aliguori59a703e2009-02-05 21:23:58 +000094 qemu_bh_delete(dbs->bh);
95 dbs->bh = NULL;
Markus Armbruster4be74632014-10-07 13:59:18 +020096 dma_blk_cb(dbs, 0);
aliguori59a703e2009-02-05 21:23:58 +000097}
98
Markus Armbruster4be74632014-10-07 13:59:18 +020099static void dma_blk_unmap(DMAAIOCB *dbs)
aliguori59a703e2009-02-05 21:23:58 +0000100{
aliguori59a703e2009-02-05 21:23:58 +0000101 int i;
102
aliguori59a703e2009-02-05 21:23:58 +0000103 for (i = 0; i < dbs->iov.niov; ++i) {
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200104 dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
David Gibsonc65bcef2012-06-27 14:50:40 +1000105 dbs->iov.iov[i].iov_len, dbs->dir,
106 dbs->iov.iov[i].iov_len);
aliguori59a703e2009-02-05 21:23:58 +0000107 }
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200108 qemu_iovec_reset(&dbs->iov);
109}
110
111static void dma_complete(DMAAIOCB *dbs, int ret)
112{
Kevin Wolfc57c4652011-11-24 06:15:28 -0500113 trace_dma_complete(dbs, ret, dbs->common.cb);
114
Paolo Bonzini539343c2019-07-29 23:34:16 +0200115 assert(!dbs->acb && !dbs->bh);
Markus Armbruster4be74632014-10-07 13:59:18 +0200116 dma_blk_unmap(dbs);
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200117 if (dbs->common.cb) {
118 dbs->common.cb(dbs->common.opaque, ret);
119 }
120 qemu_iovec_destroy(&dbs->iov);
Fam Zheng80074292014-09-11 13:41:28 +0800121 qemu_aio_unref(dbs);
aliguori7403b142009-03-28 16:11:25 +0000122}
123
Markus Armbruster4be74632014-10-07 13:59:18 +0200124static void dma_blk_cb(void *opaque, int ret)
aliguori7403b142009-03-28 16:11:25 +0000125{
126 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
David Gibsonc65bcef2012-06-27 14:50:40 +1000127 dma_addr_t cur_addr, cur_len;
aliguori7403b142009-03-28 16:11:25 +0000128 void *mem;
129
Markus Armbruster4be74632014-10-07 13:59:18 +0200130 trace_dma_blk_cb(dbs, ret);
Kevin Wolfc57c4652011-11-24 06:15:28 -0500131
aliguori7403b142009-03-28 16:11:25 +0000132 dbs->acb = NULL;
Eric Blaked4f510e2016-05-06 10:26:31 -0600133 dbs->offset += dbs->iov.size;
aliguori59a703e2009-02-05 21:23:58 +0000134
135 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200136 dma_complete(dbs, ret);
aliguori59a703e2009-02-05 21:23:58 +0000137 return;
138 }
Markus Armbruster4be74632014-10-07 13:59:18 +0200139 dma_blk_unmap(dbs);
aliguori59a703e2009-02-05 21:23:58 +0000140
141 while (dbs->sg_cur_index < dbs->sg->nsg) {
142 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
143 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200144 mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
aliguori59a703e2009-02-05 21:23:58 +0000145 if (!mem)
146 break;
147 qemu_iovec_add(&dbs->iov, mem, cur_len);
148 dbs->sg_cur_byte += cur_len;
149 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
150 dbs->sg_cur_byte = 0;
151 ++dbs->sg_cur_index;
152 }
153 }
154
155 if (dbs->iov.size == 0) {
Kevin Wolfc57c4652011-11-24 06:15:28 -0500156 trace_dma_map_wait(dbs);
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200157 dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs);
Fam Zhenge95205e2015-03-16 17:03:37 +0800158 cpu_register_map_client(dbs->bh);
aliguori59a703e2009-02-05 21:23:58 +0000159 return;
160 }
161
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400162 if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
163 qemu_iovec_discard_back(&dbs->iov,
164 QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
Kevin Wolf58f423f2014-07-09 19:17:30 +0200165 }
166
Paolo Bonzini19196312017-02-13 14:52:31 +0100167 aio_context_acquire(dbs->ctx);
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200168 dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
169 dma_blk_cb, dbs, dbs->io_func_opaque);
Paolo Bonzini19196312017-02-13 14:52:31 +0100170 aio_context_release(dbs->ctx);
Paolo Bonzini6bee44e2011-11-14 17:50:52 +0100171 assert(dbs->acb);
aliguori59a703e2009-02-05 21:23:58 +0000172}
173
Markus Armbruster7c84b1b2014-10-07 13:59:14 +0200174static void dma_aio_cancel(BlockAIOCB *acb)
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200175{
176 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
177
Kevin Wolfc57c4652011-11-24 06:15:28 -0500178 trace_dma_aio_cancel(dbs);
179
Paolo Bonzini539343c2019-07-29 23:34:16 +0200180 assert(!(dbs->acb && dbs->bh));
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200181 if (dbs->acb) {
Paolo Bonzini539343c2019-07-29 23:34:16 +0200182 /* This will invoke dma_blk_cb. */
Markus Armbruster4be74632014-10-07 13:59:18 +0200183 blk_aio_cancel_async(dbs->acb);
Paolo Bonzini539343c2019-07-29 23:34:16 +0200184 return;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200185 }
Paolo Bonzini539343c2019-07-29 23:34:16 +0200186
Fam Zhenge95205e2015-03-16 17:03:37 +0800187 if (dbs->bh) {
188 cpu_unregister_map_client(dbs->bh);
189 qemu_bh_delete(dbs->bh);
190 dbs->bh = NULL;
191 }
Paolo Bonzini539343c2019-07-29 23:34:16 +0200192 if (dbs->common.cb) {
193 dbs->common.cb(dbs->common.opaque, -ECANCELED);
194 }
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200195}
196
Stefan Hajnoczi5fa78b22016-06-20 20:36:57 +0100197static AioContext *dma_get_aio_context(BlockAIOCB *acb)
198{
199 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
200
201 return dbs->ctx;
202}
Fam Zheng9bb9da42014-09-11 13:41:14 +0800203
Stefan Hajnoczid7331be2012-10-31 16:34:37 +0100204static const AIOCBInfo dma_aiocb_info = {
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200205 .aiocb_size = sizeof(DMAAIOCB),
Fam Zheng9bb9da42014-09-11 13:41:14 +0800206 .cancel_async = dma_aio_cancel,
Stefan Hajnoczi5fa78b22016-06-20 20:36:57 +0100207 .get_aio_context = dma_get_aio_context,
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200208};
209
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200210BlockAIOCB *dma_blk_io(AioContext *ctx,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400211 QEMUSGList *sg, uint64_t offset, uint32_t align,
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200212 DMAIOFunc *io_func, void *io_func_opaque,
213 BlockCompletionFunc *cb,
David Gibson43cf8ae2012-03-27 13:42:23 +1100214 void *opaque, DMADirection dir)
aliguori59a703e2009-02-05 21:23:58 +0000215{
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200216 DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, NULL, cb, opaque);
aliguori59a703e2009-02-05 21:23:58 +0000217
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200218 trace_dma_blk_io(dbs, io_func_opaque, offset, (dir == DMA_DIRECTION_TO_DEVICE));
Kevin Wolfc57c4652011-11-24 06:15:28 -0500219
aliguori37b78422009-03-20 18:26:16 +0000220 dbs->acb = NULL;
aliguori59a703e2009-02-05 21:23:58 +0000221 dbs->sg = sg;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200222 dbs->ctx = ctx;
Paolo Bonzinicbe0ed62016-05-23 14:54:05 +0200223 dbs->offset = offset;
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400224 dbs->align = align;
aliguori59a703e2009-02-05 21:23:58 +0000225 dbs->sg_cur_index = 0;
226 dbs->sg_cur_byte = 0;
David Gibson43cf8ae2012-03-27 13:42:23 +1100227 dbs->dir = dir;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +0200228 dbs->io_func = io_func;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200229 dbs->io_func_opaque = io_func_opaque;
aliguori59a703e2009-02-05 21:23:58 +0000230 dbs->bh = NULL;
231 qemu_iovec_init(&dbs->iov, sg->nsg);
Markus Armbruster4be74632014-10-07 13:59:18 +0200232 dma_blk_cb(dbs, 0);
aliguori37b78422009-03-20 18:26:16 +0000233 return &dbs->common;
aliguori59a703e2009-02-05 21:23:58 +0000234}
235
236
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200237static
238BlockAIOCB *dma_blk_read_io_func(int64_t offset, QEMUIOVector *iov,
239 BlockCompletionFunc *cb, void *cb_opaque,
240 void *opaque)
241{
242 BlockBackend *blk = opaque;
243 return blk_aio_preadv(blk, offset, iov, 0, cb, cb_opaque);
244}
245
Markus Armbruster4be74632014-10-07 13:59:18 +0200246BlockAIOCB *dma_blk_read(BlockBackend *blk,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400247 QEMUSGList *sg, uint64_t offset, uint32_t align,
Markus Armbruster4be74632014-10-07 13:59:18 +0200248 void (*cb)(void *opaque, int ret), void *opaque)
249{
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400250 return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
251 dma_blk_read_io_func, blk, cb, opaque,
Markus Armbruster4be74632014-10-07 13:59:18 +0200252 DMA_DIRECTION_FROM_DEVICE);
253}
254
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200255static
256BlockAIOCB *dma_blk_write_io_func(int64_t offset, QEMUIOVector *iov,
257 BlockCompletionFunc *cb, void *cb_opaque,
258 void *opaque)
259{
260 BlockBackend *blk = opaque;
261 return blk_aio_pwritev(blk, offset, iov, 0, cb, cb_opaque);
262}
263
Markus Armbruster4be74632014-10-07 13:59:18 +0200264BlockAIOCB *dma_blk_write(BlockBackend *blk,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400265 QEMUSGList *sg, uint64_t offset, uint32_t align,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +0200266 void (*cb)(void *opaque, int ret), void *opaque)
aliguori59a703e2009-02-05 21:23:58 +0000267{
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400268 return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
269 dma_blk_write_io_func, blk, cb, opaque,
Markus Armbruster4be74632014-10-07 13:59:18 +0200270 DMA_DIRECTION_TO_DEVICE);
aliguori59a703e2009-02-05 21:23:58 +0000271}
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200272
273
David Gibsonc65bcef2012-06-27 14:50:40 +1000274static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
275 DMADirection dir)
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200276{
277 uint64_t resid;
278 int sg_cur_index;
279
280 resid = sg->size;
281 sg_cur_index = 0;
282 len = MIN(len, resid);
283 while (len > 0) {
284 ScatterGatherEntry entry = sg->sg[sg_cur_index++];
285 int32_t xfer = MIN(len, entry.len);
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200286 dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200287 ptr += xfer;
288 len -= xfer;
289 resid -= xfer;
290 }
291
292 return resid;
293}
294
295uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
296{
David Gibsonc65bcef2012-06-27 14:50:40 +1000297 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200298}
299
300uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
301{
David Gibsonc65bcef2012-06-27 14:50:40 +1000302 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200303}
Paolo Bonzini84a69352011-09-05 14:20:29 +0200304
Markus Armbruster4be74632014-10-07 13:59:18 +0200305void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
Paolo Bonzini84a69352011-09-05 14:20:29 +0200306 QEMUSGList *sg, enum BlockAcctType type)
307{
Markus Armbruster4be74632014-10-07 13:59:18 +0200308 block_acct_start(blk_get_stats(blk), cookie, sg->size, type);
Paolo Bonzini84a69352011-09-05 14:20:29 +0200309}