blob: 41ef24a63b6b077022fd329b528dac9cc19ead70 [file] [log] [blame]
aliguori244ab902009-02-05 21:23:50 +00001/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
Peter Maydelld38ea872016-01-29 17:50:05 +000010#include "qemu/osdep.h"
Markus Armbruster4be74632014-10-07 13:59:18 +020011#include "sysemu/block-backend.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010012#include "sysemu/dma.h"
Paolo Bonzini243af022020-02-04 12:20:10 +010013#include "trace/trace-root.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010014#include "qemu/thread.h"
Alex Bligh6a1751b2013-08-21 16:02:47 +010015#include "qemu/main-loop.h"
Pavel Dovgalyuk5fb0a6b2020-06-03 13:22:02 +030016#include "sysemu/cpus.h"
17#include "qemu/range.h"
aliguori244ab902009-02-05 21:23:50 +000018
David Gibsone5332e62012-06-27 14:50:43 +100019/* #define DEBUG_IOMMU */
20
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020021int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
David Gibsond86a77f2012-06-27 14:50:38 +100022{
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020023 dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
Paolo Bonzini24addbc2013-04-10 17:49:04 +020024
David Gibsond86a77f2012-06-27 14:50:38 +100025#define FILLBUF_SIZE 512
26 uint8_t fillbuf[FILLBUF_SIZE];
27 int l;
Paolo Bonzini24addbc2013-04-10 17:49:04 +020028 bool error = false;
David Gibsond86a77f2012-06-27 14:50:38 +100029
30 memset(fillbuf, c, FILLBUF_SIZE);
31 while (len > 0) {
32 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
Peter Maydell19f70342020-02-18 11:24:57 +000033 error |= address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED,
34 fillbuf, l);
Benjamin Herrenschmidtbc9b78d2012-08-14 17:41:47 +100035 len -= l;
36 addr += l;
David Gibsond86a77f2012-06-27 14:50:38 +100037 }
David Gibsone5332e62012-06-27 14:50:43 +100038
Paolo Bonzini24addbc2013-04-10 17:49:04 +020039 return error;
David Gibsond86a77f2012-06-27 14:50:38 +100040}
41
Paolo Bonzinif487b672013-06-03 14:17:19 +020042void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
43 AddressSpace *as)
aliguori244ab902009-02-05 21:23:50 +000044{
Anthony Liguori7267c092011-08-20 22:09:37 -050045 qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
aliguori244ab902009-02-05 21:23:50 +000046 qsg->nsg = 0;
47 qsg->nalloc = alloc_hint;
48 qsg->size = 0;
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020049 qsg->as = as;
Paolo Bonzinif487b672013-06-03 14:17:19 +020050 qsg->dev = dev;
51 object_ref(OBJECT(dev));
aliguori244ab902009-02-05 21:23:50 +000052}
53
David Gibsond3231182011-10-31 17:06:46 +110054void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
aliguori244ab902009-02-05 21:23:50 +000055{
56 if (qsg->nsg == qsg->nalloc) {
57 qsg->nalloc = 2 * qsg->nalloc + 1;
Anthony Liguori7267c092011-08-20 22:09:37 -050058 qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
aliguori244ab902009-02-05 21:23:50 +000059 }
60 qsg->sg[qsg->nsg].base = base;
61 qsg->sg[qsg->nsg].len = len;
62 qsg->size += len;
63 ++qsg->nsg;
64}
65
66void qemu_sglist_destroy(QEMUSGList *qsg)
67{
Paolo Bonzinif487b672013-06-03 14:17:19 +020068 object_unref(OBJECT(qsg->dev));
Anthony Liguori7267c092011-08-20 22:09:37 -050069 g_free(qsg->sg);
Jason Baronea8d82a2012-08-03 15:57:10 -040070 memset(qsg, 0, sizeof(*qsg));
aliguori244ab902009-02-05 21:23:50 +000071}
72
aliguori59a703e2009-02-05 21:23:58 +000073typedef struct {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020074 BlockAIOCB common;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +020075 AioContext *ctx;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020076 BlockAIOCB *acb;
aliguori59a703e2009-02-05 21:23:58 +000077 QEMUSGList *sg;
Mark Cave-Ayland99868af2016-10-27 16:29:13 -040078 uint32_t align;
Eric Blaked4f510e2016-05-06 10:26:31 -060079 uint64_t offset;
David Gibson43cf8ae2012-03-27 13:42:23 +110080 DMADirection dir;
aliguori59a703e2009-02-05 21:23:58 +000081 int sg_cur_index;
David Gibsond3231182011-10-31 17:06:46 +110082 dma_addr_t sg_cur_byte;
aliguori59a703e2009-02-05 21:23:58 +000083 QEMUIOVector iov;
84 QEMUBH *bh;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +020085 DMAIOFunc *io_func;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +020086 void *io_func_opaque;
aliguori37b78422009-03-20 18:26:16 +000087} DMAAIOCB;
aliguori59a703e2009-02-05 21:23:58 +000088
Markus Armbruster4be74632014-10-07 13:59:18 +020089static void dma_blk_cb(void *opaque, int ret);
aliguori59a703e2009-02-05 21:23:58 +000090
91static void reschedule_dma(void *opaque)
92{
aliguori37b78422009-03-20 18:26:16 +000093 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
aliguori59a703e2009-02-05 21:23:58 +000094
Paolo Bonzini539343c2019-07-29 23:34:16 +020095 assert(!dbs->acb && dbs->bh);
aliguori59a703e2009-02-05 21:23:58 +000096 qemu_bh_delete(dbs->bh);
97 dbs->bh = NULL;
Markus Armbruster4be74632014-10-07 13:59:18 +020098 dma_blk_cb(dbs, 0);
aliguori59a703e2009-02-05 21:23:58 +000099}
100
Markus Armbruster4be74632014-10-07 13:59:18 +0200101static void dma_blk_unmap(DMAAIOCB *dbs)
aliguori59a703e2009-02-05 21:23:58 +0000102{
aliguori59a703e2009-02-05 21:23:58 +0000103 int i;
104
aliguori59a703e2009-02-05 21:23:58 +0000105 for (i = 0; i < dbs->iov.niov; ++i) {
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200106 dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
David Gibsonc65bcef2012-06-27 14:50:40 +1000107 dbs->iov.iov[i].iov_len, dbs->dir,
108 dbs->iov.iov[i].iov_len);
aliguori59a703e2009-02-05 21:23:58 +0000109 }
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200110 qemu_iovec_reset(&dbs->iov);
111}
112
113static void dma_complete(DMAAIOCB *dbs, int ret)
114{
Kevin Wolfc57c4652011-11-24 06:15:28 -0500115 trace_dma_complete(dbs, ret, dbs->common.cb);
116
Paolo Bonzini539343c2019-07-29 23:34:16 +0200117 assert(!dbs->acb && !dbs->bh);
Markus Armbruster4be74632014-10-07 13:59:18 +0200118 dma_blk_unmap(dbs);
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200119 if (dbs->common.cb) {
120 dbs->common.cb(dbs->common.opaque, ret);
121 }
122 qemu_iovec_destroy(&dbs->iov);
Fam Zheng80074292014-09-11 13:41:28 +0800123 qemu_aio_unref(dbs);
aliguori7403b142009-03-28 16:11:25 +0000124}
125
Markus Armbruster4be74632014-10-07 13:59:18 +0200126static void dma_blk_cb(void *opaque, int ret)
aliguori7403b142009-03-28 16:11:25 +0000127{
128 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
David Gibsonc65bcef2012-06-27 14:50:40 +1000129 dma_addr_t cur_addr, cur_len;
aliguori7403b142009-03-28 16:11:25 +0000130 void *mem;
131
Markus Armbruster4be74632014-10-07 13:59:18 +0200132 trace_dma_blk_cb(dbs, ret);
Kevin Wolfc57c4652011-11-24 06:15:28 -0500133
aliguori7403b142009-03-28 16:11:25 +0000134 dbs->acb = NULL;
Eric Blaked4f510e2016-05-06 10:26:31 -0600135 dbs->offset += dbs->iov.size;
aliguori59a703e2009-02-05 21:23:58 +0000136
137 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200138 dma_complete(dbs, ret);
aliguori59a703e2009-02-05 21:23:58 +0000139 return;
140 }
Markus Armbruster4be74632014-10-07 13:59:18 +0200141 dma_blk_unmap(dbs);
aliguori59a703e2009-02-05 21:23:58 +0000142
143 while (dbs->sg_cur_index < dbs->sg->nsg) {
144 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
145 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200146 mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
Pavel Dovgalyuk5fb0a6b2020-06-03 13:22:02 +0300147 /*
148 * Make reads deterministic in icount mode. Windows sometimes issues
149 * disk read requests with overlapping SGs. It leads
150 * to non-determinism, because resulting buffer contents may be mixed
151 * from several sectors. This code splits all SGs into several
152 * groups. SGs in every group do not overlap.
153 */
154 if (mem && use_icount && dbs->dir == DMA_DIRECTION_FROM_DEVICE) {
155 int i;
156 for (i = 0 ; i < dbs->iov.niov ; ++i) {
157 if (ranges_overlap((intptr_t)dbs->iov.iov[i].iov_base,
158 dbs->iov.iov[i].iov_len, (intptr_t)mem,
159 cur_len)) {
160 dma_memory_unmap(dbs->sg->as, mem, cur_len,
161 dbs->dir, cur_len);
162 mem = NULL;
163 break;
164 }
165 }
166 }
aliguori59a703e2009-02-05 21:23:58 +0000167 if (!mem)
168 break;
169 qemu_iovec_add(&dbs->iov, mem, cur_len);
170 dbs->sg_cur_byte += cur_len;
171 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
172 dbs->sg_cur_byte = 0;
173 ++dbs->sg_cur_index;
174 }
175 }
176
177 if (dbs->iov.size == 0) {
Kevin Wolfc57c4652011-11-24 06:15:28 -0500178 trace_dma_map_wait(dbs);
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200179 dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs);
Fam Zhenge95205e2015-03-16 17:03:37 +0800180 cpu_register_map_client(dbs->bh);
aliguori59a703e2009-02-05 21:23:58 +0000181 return;
182 }
183
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400184 if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
185 qemu_iovec_discard_back(&dbs->iov,
186 QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
Kevin Wolf58f423f2014-07-09 19:17:30 +0200187 }
188
Paolo Bonzini19196312017-02-13 14:52:31 +0100189 aio_context_acquire(dbs->ctx);
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200190 dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
191 dma_blk_cb, dbs, dbs->io_func_opaque);
Paolo Bonzini19196312017-02-13 14:52:31 +0100192 aio_context_release(dbs->ctx);
Paolo Bonzini6bee44e2011-11-14 17:50:52 +0100193 assert(dbs->acb);
aliguori59a703e2009-02-05 21:23:58 +0000194}
195
Markus Armbruster7c84b1b2014-10-07 13:59:14 +0200196static void dma_aio_cancel(BlockAIOCB *acb)
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200197{
198 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
199
Kevin Wolfc57c4652011-11-24 06:15:28 -0500200 trace_dma_aio_cancel(dbs);
201
Paolo Bonzini539343c2019-07-29 23:34:16 +0200202 assert(!(dbs->acb && dbs->bh));
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200203 if (dbs->acb) {
Paolo Bonzini539343c2019-07-29 23:34:16 +0200204 /* This will invoke dma_blk_cb. */
Markus Armbruster4be74632014-10-07 13:59:18 +0200205 blk_aio_cancel_async(dbs->acb);
Paolo Bonzini539343c2019-07-29 23:34:16 +0200206 return;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200207 }
Paolo Bonzini539343c2019-07-29 23:34:16 +0200208
Fam Zhenge95205e2015-03-16 17:03:37 +0800209 if (dbs->bh) {
210 cpu_unregister_map_client(dbs->bh);
211 qemu_bh_delete(dbs->bh);
212 dbs->bh = NULL;
213 }
Paolo Bonzini539343c2019-07-29 23:34:16 +0200214 if (dbs->common.cb) {
215 dbs->common.cb(dbs->common.opaque, -ECANCELED);
216 }
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200217}
218
Stefan Hajnoczi5fa78b22016-06-20 20:36:57 +0100219static AioContext *dma_get_aio_context(BlockAIOCB *acb)
220{
221 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
222
223 return dbs->ctx;
224}
Fam Zheng9bb9da42014-09-11 13:41:14 +0800225
Stefan Hajnoczid7331be2012-10-31 16:34:37 +0100226static const AIOCBInfo dma_aiocb_info = {
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200227 .aiocb_size = sizeof(DMAAIOCB),
Fam Zheng9bb9da42014-09-11 13:41:14 +0800228 .cancel_async = dma_aio_cancel,
Stefan Hajnoczi5fa78b22016-06-20 20:36:57 +0100229 .get_aio_context = dma_get_aio_context,
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200230};
231
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200232BlockAIOCB *dma_blk_io(AioContext *ctx,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400233 QEMUSGList *sg, uint64_t offset, uint32_t align,
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200234 DMAIOFunc *io_func, void *io_func_opaque,
235 BlockCompletionFunc *cb,
David Gibson43cf8ae2012-03-27 13:42:23 +1100236 void *opaque, DMADirection dir)
aliguori59a703e2009-02-05 21:23:58 +0000237{
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200238 DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, NULL, cb, opaque);
aliguori59a703e2009-02-05 21:23:58 +0000239
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200240 trace_dma_blk_io(dbs, io_func_opaque, offset, (dir == DMA_DIRECTION_TO_DEVICE));
Kevin Wolfc57c4652011-11-24 06:15:28 -0500241
aliguori37b78422009-03-20 18:26:16 +0000242 dbs->acb = NULL;
aliguori59a703e2009-02-05 21:23:58 +0000243 dbs->sg = sg;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200244 dbs->ctx = ctx;
Paolo Bonzinicbe0ed62016-05-23 14:54:05 +0200245 dbs->offset = offset;
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400246 dbs->align = align;
aliguori59a703e2009-02-05 21:23:58 +0000247 dbs->sg_cur_index = 0;
248 dbs->sg_cur_byte = 0;
David Gibson43cf8ae2012-03-27 13:42:23 +1100249 dbs->dir = dir;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +0200250 dbs->io_func = io_func;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200251 dbs->io_func_opaque = io_func_opaque;
aliguori59a703e2009-02-05 21:23:58 +0000252 dbs->bh = NULL;
253 qemu_iovec_init(&dbs->iov, sg->nsg);
Markus Armbruster4be74632014-10-07 13:59:18 +0200254 dma_blk_cb(dbs, 0);
aliguori37b78422009-03-20 18:26:16 +0000255 return &dbs->common;
aliguori59a703e2009-02-05 21:23:58 +0000256}
257
258
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200259static
260BlockAIOCB *dma_blk_read_io_func(int64_t offset, QEMUIOVector *iov,
261 BlockCompletionFunc *cb, void *cb_opaque,
262 void *opaque)
263{
264 BlockBackend *blk = opaque;
265 return blk_aio_preadv(blk, offset, iov, 0, cb, cb_opaque);
266}
267
Markus Armbruster4be74632014-10-07 13:59:18 +0200268BlockAIOCB *dma_blk_read(BlockBackend *blk,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400269 QEMUSGList *sg, uint64_t offset, uint32_t align,
Markus Armbruster4be74632014-10-07 13:59:18 +0200270 void (*cb)(void *opaque, int ret), void *opaque)
271{
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400272 return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
273 dma_blk_read_io_func, blk, cb, opaque,
Markus Armbruster4be74632014-10-07 13:59:18 +0200274 DMA_DIRECTION_FROM_DEVICE);
275}
276
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200277static
278BlockAIOCB *dma_blk_write_io_func(int64_t offset, QEMUIOVector *iov,
279 BlockCompletionFunc *cb, void *cb_opaque,
280 void *opaque)
281{
282 BlockBackend *blk = opaque;
283 return blk_aio_pwritev(blk, offset, iov, 0, cb, cb_opaque);
284}
285
Markus Armbruster4be74632014-10-07 13:59:18 +0200286BlockAIOCB *dma_blk_write(BlockBackend *blk,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400287 QEMUSGList *sg, uint64_t offset, uint32_t align,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +0200288 void (*cb)(void *opaque, int ret), void *opaque)
aliguori59a703e2009-02-05 21:23:58 +0000289{
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400290 return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
291 dma_blk_write_io_func, blk, cb, opaque,
Markus Armbruster4be74632014-10-07 13:59:18 +0200292 DMA_DIRECTION_TO_DEVICE);
aliguori59a703e2009-02-05 21:23:58 +0000293}
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200294
295
David Gibsonc65bcef2012-06-27 14:50:40 +1000296static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
297 DMADirection dir)
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200298{
299 uint64_t resid;
300 int sg_cur_index;
301
302 resid = sg->size;
303 sg_cur_index = 0;
304 len = MIN(len, resid);
305 while (len > 0) {
306 ScatterGatherEntry entry = sg->sg[sg_cur_index++];
307 int32_t xfer = MIN(len, entry.len);
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200308 dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200309 ptr += xfer;
310 len -= xfer;
311 resid -= xfer;
312 }
313
314 return resid;
315}
316
317uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
318{
David Gibsonc65bcef2012-06-27 14:50:40 +1000319 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200320}
321
322uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
323{
David Gibsonc65bcef2012-06-27 14:50:40 +1000324 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200325}
Paolo Bonzini84a69352011-09-05 14:20:29 +0200326
Markus Armbruster4be74632014-10-07 13:59:18 +0200327void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
Paolo Bonzini84a69352011-09-05 14:20:29 +0200328 QEMUSGList *sg, enum BlockAcctType type)
329{
Markus Armbruster4be74632014-10-07 13:59:18 +0200330 block_acct_start(blk_get_stats(blk), cookie, sg->size, type);
Paolo Bonzini84a69352011-09-05 14:20:29 +0200331}