blob: 33b1983b25a06f26dddc2e82f4c6327570dfa528 [file] [log] [blame]
aliguori244ab902009-02-05 21:23:50 +00001/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
Markus Armbruster4be74632014-10-07 13:59:18 +020010#include "sysemu/block-backend.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010011#include "sysemu/dma.h"
Kevin Wolfc57c4652011-11-24 06:15:28 -050012#include "trace.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010013#include "qemu/range.h"
14#include "qemu/thread.h"
Alex Bligh6a1751b2013-08-21 16:02:47 +010015#include "qemu/main-loop.h"
aliguori244ab902009-02-05 21:23:50 +000016
David Gibsone5332e62012-06-27 14:50:43 +100017/* #define DEBUG_IOMMU */
18
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020019int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
David Gibsond86a77f2012-06-27 14:50:38 +100020{
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020021 dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
Paolo Bonzini24addbc2013-04-10 17:49:04 +020022
David Gibsond86a77f2012-06-27 14:50:38 +100023#define FILLBUF_SIZE 512
24 uint8_t fillbuf[FILLBUF_SIZE];
25 int l;
Paolo Bonzini24addbc2013-04-10 17:49:04 +020026 bool error = false;
David Gibsond86a77f2012-06-27 14:50:38 +100027
28 memset(fillbuf, c, FILLBUF_SIZE);
29 while (len > 0) {
30 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
Peter Maydell5c9eb022015-04-26 16:49:24 +010031 error |= address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
32 fillbuf, l, true);
Benjamin Herrenschmidtbc9b78d2012-08-14 17:41:47 +100033 len -= l;
34 addr += l;
David Gibsond86a77f2012-06-27 14:50:38 +100035 }
David Gibsone5332e62012-06-27 14:50:43 +100036
Paolo Bonzini24addbc2013-04-10 17:49:04 +020037 return error;
David Gibsond86a77f2012-06-27 14:50:38 +100038}
39
Paolo Bonzinif487b672013-06-03 14:17:19 +020040void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
41 AddressSpace *as)
aliguori244ab902009-02-05 21:23:50 +000042{
Anthony Liguori7267c092011-08-20 22:09:37 -050043 qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
aliguori244ab902009-02-05 21:23:50 +000044 qsg->nsg = 0;
45 qsg->nalloc = alloc_hint;
46 qsg->size = 0;
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020047 qsg->as = as;
Paolo Bonzinif487b672013-06-03 14:17:19 +020048 qsg->dev = dev;
49 object_ref(OBJECT(dev));
aliguori244ab902009-02-05 21:23:50 +000050}
51
David Gibsond3231182011-10-31 17:06:46 +110052void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
aliguori244ab902009-02-05 21:23:50 +000053{
54 if (qsg->nsg == qsg->nalloc) {
55 qsg->nalloc = 2 * qsg->nalloc + 1;
Anthony Liguori7267c092011-08-20 22:09:37 -050056 qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
aliguori244ab902009-02-05 21:23:50 +000057 }
58 qsg->sg[qsg->nsg].base = base;
59 qsg->sg[qsg->nsg].len = len;
60 qsg->size += len;
61 ++qsg->nsg;
62}
63
64void qemu_sglist_destroy(QEMUSGList *qsg)
65{
Paolo Bonzinif487b672013-06-03 14:17:19 +020066 object_unref(OBJECT(qsg->dev));
Anthony Liguori7267c092011-08-20 22:09:37 -050067 g_free(qsg->sg);
Jason Baronea8d82a2012-08-03 15:57:10 -040068 memset(qsg, 0, sizeof(*qsg));
aliguori244ab902009-02-05 21:23:50 +000069}
70
aliguori59a703e2009-02-05 21:23:58 +000071typedef struct {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020072 BlockAIOCB common;
Markus Armbruster4be74632014-10-07 13:59:18 +020073 BlockBackend *blk;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020074 BlockAIOCB *acb;
aliguori59a703e2009-02-05 21:23:58 +000075 QEMUSGList *sg;
76 uint64_t sector_num;
David Gibson43cf8ae2012-03-27 13:42:23 +110077 DMADirection dir;
aliguori59a703e2009-02-05 21:23:58 +000078 int sg_cur_index;
David Gibsond3231182011-10-31 17:06:46 +110079 dma_addr_t sg_cur_byte;
aliguori59a703e2009-02-05 21:23:58 +000080 QEMUIOVector iov;
81 QEMUBH *bh;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +020082 DMAIOFunc *io_func;
aliguori37b78422009-03-20 18:26:16 +000083} DMAAIOCB;
aliguori59a703e2009-02-05 21:23:58 +000084
Markus Armbruster4be74632014-10-07 13:59:18 +020085static void dma_blk_cb(void *opaque, int ret);
aliguori59a703e2009-02-05 21:23:58 +000086
87static void reschedule_dma(void *opaque)
88{
aliguori37b78422009-03-20 18:26:16 +000089 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
aliguori59a703e2009-02-05 21:23:58 +000090
91 qemu_bh_delete(dbs->bh);
92 dbs->bh = NULL;
Markus Armbruster4be74632014-10-07 13:59:18 +020093 dma_blk_cb(dbs, 0);
aliguori59a703e2009-02-05 21:23:58 +000094}
95
96static void continue_after_map_failure(void *opaque)
97{
aliguori37b78422009-03-20 18:26:16 +000098 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
aliguori59a703e2009-02-05 21:23:58 +000099
100 dbs->bh = qemu_bh_new(reschedule_dma, dbs);
101 qemu_bh_schedule(dbs->bh);
102}
103
Markus Armbruster4be74632014-10-07 13:59:18 +0200104static void dma_blk_unmap(DMAAIOCB *dbs)
aliguori59a703e2009-02-05 21:23:58 +0000105{
aliguori59a703e2009-02-05 21:23:58 +0000106 int i;
107
aliguori59a703e2009-02-05 21:23:58 +0000108 for (i = 0; i < dbs->iov.niov; ++i) {
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200109 dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
David Gibsonc65bcef2012-06-27 14:50:40 +1000110 dbs->iov.iov[i].iov_len, dbs->dir,
111 dbs->iov.iov[i].iov_len);
aliguori59a703e2009-02-05 21:23:58 +0000112 }
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200113 qemu_iovec_reset(&dbs->iov);
114}
115
116static void dma_complete(DMAAIOCB *dbs, int ret)
117{
Kevin Wolfc57c4652011-11-24 06:15:28 -0500118 trace_dma_complete(dbs, ret, dbs->common.cb);
119
Markus Armbruster4be74632014-10-07 13:59:18 +0200120 dma_blk_unmap(dbs);
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200121 if (dbs->common.cb) {
122 dbs->common.cb(dbs->common.opaque, ret);
123 }
124 qemu_iovec_destroy(&dbs->iov);
125 if (dbs->bh) {
126 qemu_bh_delete(dbs->bh);
127 dbs->bh = NULL;
128 }
Fam Zheng80074292014-09-11 13:41:28 +0800129 qemu_aio_unref(dbs);
aliguori7403b142009-03-28 16:11:25 +0000130}
131
Markus Armbruster4be74632014-10-07 13:59:18 +0200132static void dma_blk_cb(void *opaque, int ret)
aliguori7403b142009-03-28 16:11:25 +0000133{
134 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
David Gibsonc65bcef2012-06-27 14:50:40 +1000135 dma_addr_t cur_addr, cur_len;
aliguori7403b142009-03-28 16:11:25 +0000136 void *mem;
137
Markus Armbruster4be74632014-10-07 13:59:18 +0200138 trace_dma_blk_cb(dbs, ret);
Kevin Wolfc57c4652011-11-24 06:15:28 -0500139
aliguori7403b142009-03-28 16:11:25 +0000140 dbs->acb = NULL;
141 dbs->sector_num += dbs->iov.size / 512;
aliguori59a703e2009-02-05 21:23:58 +0000142
143 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200144 dma_complete(dbs, ret);
aliguori59a703e2009-02-05 21:23:58 +0000145 return;
146 }
Markus Armbruster4be74632014-10-07 13:59:18 +0200147 dma_blk_unmap(dbs);
aliguori59a703e2009-02-05 21:23:58 +0000148
149 while (dbs->sg_cur_index < dbs->sg->nsg) {
150 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
151 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200152 mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
aliguori59a703e2009-02-05 21:23:58 +0000153 if (!mem)
154 break;
155 qemu_iovec_add(&dbs->iov, mem, cur_len);
156 dbs->sg_cur_byte += cur_len;
157 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
158 dbs->sg_cur_byte = 0;
159 ++dbs->sg_cur_index;
160 }
161 }
162
163 if (dbs->iov.size == 0) {
Kevin Wolfc57c4652011-11-24 06:15:28 -0500164 trace_dma_map_wait(dbs);
aliguori59a703e2009-02-05 21:23:58 +0000165 cpu_register_map_client(dbs, continue_after_map_failure);
166 return;
167 }
168
Kevin Wolf58f423f2014-07-09 19:17:30 +0200169 if (dbs->iov.size & ~BDRV_SECTOR_MASK) {
170 qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
171 }
172
Markus Armbruster4be74632014-10-07 13:59:18 +0200173 dbs->acb = dbs->io_func(dbs->blk, dbs->sector_num, &dbs->iov,
174 dbs->iov.size / 512, dma_blk_cb, dbs);
Paolo Bonzini6bee44e2011-11-14 17:50:52 +0100175 assert(dbs->acb);
aliguori59a703e2009-02-05 21:23:58 +0000176}
177
Markus Armbruster7c84b1b2014-10-07 13:59:14 +0200178static void dma_aio_cancel(BlockAIOCB *acb)
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200179{
180 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
181
Kevin Wolfc57c4652011-11-24 06:15:28 -0500182 trace_dma_aio_cancel(dbs);
183
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200184 if (dbs->acb) {
Markus Armbruster4be74632014-10-07 13:59:18 +0200185 blk_aio_cancel_async(dbs->acb);
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200186 }
187}
188
Fam Zheng9bb9da42014-09-11 13:41:14 +0800189
Stefan Hajnoczid7331be2012-10-31 16:34:37 +0100190static const AIOCBInfo dma_aiocb_info = {
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200191 .aiocb_size = sizeof(DMAAIOCB),
Fam Zheng9bb9da42014-09-11 13:41:14 +0800192 .cancel_async = dma_aio_cancel,
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200193};
194
Markus Armbruster4be74632014-10-07 13:59:18 +0200195BlockAIOCB *dma_blk_io(
196 BlockBackend *blk, QEMUSGList *sg, uint64_t sector_num,
Markus Armbruster097310b2014-10-07 13:59:15 +0200197 DMAIOFunc *io_func, BlockCompletionFunc *cb,
David Gibson43cf8ae2012-03-27 13:42:23 +1100198 void *opaque, DMADirection dir)
aliguori59a703e2009-02-05 21:23:58 +0000199{
Markus Armbruster4be74632014-10-07 13:59:18 +0200200 DMAAIOCB *dbs = blk_aio_get(&dma_aiocb_info, blk, cb, opaque);
aliguori59a703e2009-02-05 21:23:58 +0000201
Markus Armbruster4be74632014-10-07 13:59:18 +0200202 trace_dma_blk_io(dbs, blk, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
Kevin Wolfc57c4652011-11-24 06:15:28 -0500203
aliguori37b78422009-03-20 18:26:16 +0000204 dbs->acb = NULL;
Markus Armbruster4be74632014-10-07 13:59:18 +0200205 dbs->blk = blk;
aliguori59a703e2009-02-05 21:23:58 +0000206 dbs->sg = sg;
207 dbs->sector_num = sector_num;
208 dbs->sg_cur_index = 0;
209 dbs->sg_cur_byte = 0;
David Gibson43cf8ae2012-03-27 13:42:23 +1100210 dbs->dir = dir;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +0200211 dbs->io_func = io_func;
aliguori59a703e2009-02-05 21:23:58 +0000212 dbs->bh = NULL;
213 qemu_iovec_init(&dbs->iov, sg->nsg);
Markus Armbruster4be74632014-10-07 13:59:18 +0200214 dma_blk_cb(dbs, 0);
aliguori37b78422009-03-20 18:26:16 +0000215 return &dbs->common;
aliguori59a703e2009-02-05 21:23:58 +0000216}
217
218
Markus Armbruster4be74632014-10-07 13:59:18 +0200219BlockAIOCB *dma_blk_read(BlockBackend *blk,
220 QEMUSGList *sg, uint64_t sector,
221 void (*cb)(void *opaque, int ret), void *opaque)
222{
223 return dma_blk_io(blk, sg, sector, blk_aio_readv, cb, opaque,
224 DMA_DIRECTION_FROM_DEVICE);
225}
226
227BlockAIOCB *dma_blk_write(BlockBackend *blk,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +0200228 QEMUSGList *sg, uint64_t sector,
229 void (*cb)(void *opaque, int ret), void *opaque)
aliguori59a703e2009-02-05 21:23:58 +0000230{
Markus Armbruster4be74632014-10-07 13:59:18 +0200231 return dma_blk_io(blk, sg, sector, blk_aio_writev, cb, opaque,
232 DMA_DIRECTION_TO_DEVICE);
aliguori59a703e2009-02-05 21:23:58 +0000233}
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200234
235
David Gibsonc65bcef2012-06-27 14:50:40 +1000236static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
237 DMADirection dir)
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200238{
239 uint64_t resid;
240 int sg_cur_index;
241
242 resid = sg->size;
243 sg_cur_index = 0;
244 len = MIN(len, resid);
245 while (len > 0) {
246 ScatterGatherEntry entry = sg->sg[sg_cur_index++];
247 int32_t xfer = MIN(len, entry.len);
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200248 dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200249 ptr += xfer;
250 len -= xfer;
251 resid -= xfer;
252 }
253
254 return resid;
255}
256
257uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
258{
David Gibsonc65bcef2012-06-27 14:50:40 +1000259 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200260}
261
262uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
263{
David Gibsonc65bcef2012-06-27 14:50:40 +1000264 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200265}
Paolo Bonzini84a69352011-09-05 14:20:29 +0200266
Markus Armbruster4be74632014-10-07 13:59:18 +0200267void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
Paolo Bonzini84a69352011-09-05 14:20:29 +0200268 QEMUSGList *sg, enum BlockAcctType type)
269{
Markus Armbruster4be74632014-10-07 13:59:18 +0200270 block_acct_start(blk_get_stats(blk), cookie, sg->size, type);
Paolo Bonzini84a69352011-09-05 14:20:29 +0200271}