blob: f6403242f5feae1b43f22b478423e4610b2c4a78 [file] [log] [blame]
aliguori244ab902009-02-05 21:23:50 +00001/*
2 * DMA helper functions
3 *
Philippe Mathieu-Daudébb755f52020-10-23 17:19:17 +02004 * Copyright (c) 2009,2020 Red Hat
aliguori244ab902009-02-05 21:23:50 +00005 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
Peter Maydelld38ea872016-01-29 17:50:05 +000010#include "qemu/osdep.h"
Philippe Mathieu-Daudé32cad1f2024-12-03 15:20:13 +010011#include "system/block-backend.h"
12#include "system/dma.h"
Peter Maydell0139a4f2024-11-19 13:02:07 +000013#include "trace.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010014#include "qemu/thread.h"
Alex Bligh6a1751b2013-08-21 16:02:47 +010015#include "qemu/main-loop.h"
Philippe Mathieu-Daudé32cad1f2024-12-03 15:20:13 +010016#include "system/cpu-timers.h"
Pavel Dovgalyuk5fb0a6b2020-06-03 13:22:02 +030017#include "qemu/range.h"
aliguori244ab902009-02-05 21:23:50 +000018
David Gibsone5332e62012-06-27 14:50:43 +100019/* #define DEBUG_IOMMU */
20
Philippe Mathieu-Daudébb755f52020-10-23 17:19:17 +020021MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
Philippe Mathieu-Daudé7a36e422020-09-03 10:28:32 +020022 uint8_t c, dma_addr_t len, MemTxAttrs attrs)
David Gibsond86a77f2012-06-27 14:50:38 +100023{
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020024 dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
Paolo Bonzini24addbc2013-04-10 17:49:04 +020025
Philippe Mathieu-Daudé75f01c62022-01-15 21:37:23 +010026 return address_space_set(as, addr, c, len, attrs);
David Gibsond86a77f2012-06-27 14:50:38 +100027}
28
Paolo Bonzinif487b672013-06-03 14:17:19 +020029void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
30 AddressSpace *as)
aliguori244ab902009-02-05 21:23:50 +000031{
Markus Armbrusterb21e2382022-03-15 15:41:56 +010032 qsg->sg = g_new(ScatterGatherEntry, alloc_hint);
aliguori244ab902009-02-05 21:23:50 +000033 qsg->nsg = 0;
34 qsg->nalloc = alloc_hint;
35 qsg->size = 0;
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020036 qsg->as = as;
Paolo Bonzinif487b672013-06-03 14:17:19 +020037 qsg->dev = dev;
38 object_ref(OBJECT(dev));
aliguori244ab902009-02-05 21:23:50 +000039}
40
David Gibsond3231182011-10-31 17:06:46 +110041void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
aliguori244ab902009-02-05 21:23:50 +000042{
43 if (qsg->nsg == qsg->nalloc) {
44 qsg->nalloc = 2 * qsg->nalloc + 1;
Markus Armbrusterb21e2382022-03-15 15:41:56 +010045 qsg->sg = g_renew(ScatterGatherEntry, qsg->sg, qsg->nalloc);
aliguori244ab902009-02-05 21:23:50 +000046 }
47 qsg->sg[qsg->nsg].base = base;
48 qsg->sg[qsg->nsg].len = len;
49 qsg->size += len;
50 ++qsg->nsg;
51}
52
53void qemu_sglist_destroy(QEMUSGList *qsg)
54{
Paolo Bonzinif487b672013-06-03 14:17:19 +020055 object_unref(OBJECT(qsg->dev));
Anthony Liguori7267c092011-08-20 22:09:37 -050056 g_free(qsg->sg);
Jason Baronea8d82a2012-08-03 15:57:10 -040057 memset(qsg, 0, sizeof(*qsg));
aliguori244ab902009-02-05 21:23:50 +000058}
59
aliguori59a703e2009-02-05 21:23:58 +000060typedef struct {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020061 BlockAIOCB common;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +020062 AioContext *ctx;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020063 BlockAIOCB *acb;
aliguori59a703e2009-02-05 21:23:58 +000064 QEMUSGList *sg;
Mark Cave-Ayland99868af2016-10-27 16:29:13 -040065 uint32_t align;
Eric Blaked4f510e2016-05-06 10:26:31 -060066 uint64_t offset;
David Gibson43cf8ae2012-03-27 13:42:23 +110067 DMADirection dir;
aliguori59a703e2009-02-05 21:23:58 +000068 int sg_cur_index;
David Gibsond3231182011-10-31 17:06:46 +110069 dma_addr_t sg_cur_byte;
aliguori59a703e2009-02-05 21:23:58 +000070 QEMUIOVector iov;
71 QEMUBH *bh;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +020072 DMAIOFunc *io_func;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +020073 void *io_func_opaque;
aliguori37b78422009-03-20 18:26:16 +000074} DMAAIOCB;
aliguori59a703e2009-02-05 21:23:58 +000075
Markus Armbruster4be74632014-10-07 13:59:18 +020076static void dma_blk_cb(void *opaque, int ret);
aliguori59a703e2009-02-05 21:23:58 +000077
78static void reschedule_dma(void *opaque)
79{
aliguori37b78422009-03-20 18:26:16 +000080 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
aliguori59a703e2009-02-05 21:23:58 +000081
Paolo Bonzini539343c2019-07-29 23:34:16 +020082 assert(!dbs->acb && dbs->bh);
aliguori59a703e2009-02-05 21:23:58 +000083 qemu_bh_delete(dbs->bh);
84 dbs->bh = NULL;
Markus Armbruster4be74632014-10-07 13:59:18 +020085 dma_blk_cb(dbs, 0);
aliguori59a703e2009-02-05 21:23:58 +000086}
87
Markus Armbruster4be74632014-10-07 13:59:18 +020088static void dma_blk_unmap(DMAAIOCB *dbs)
aliguori59a703e2009-02-05 21:23:58 +000089{
aliguori59a703e2009-02-05 21:23:58 +000090 int i;
91
aliguori59a703e2009-02-05 21:23:58 +000092 for (i = 0; i < dbs->iov.niov; ++i) {
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020093 dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
David Gibsonc65bcef2012-06-27 14:50:40 +100094 dbs->iov.iov[i].iov_len, dbs->dir,
95 dbs->iov.iov[i].iov_len);
aliguori59a703e2009-02-05 21:23:58 +000096 }
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +020097 qemu_iovec_reset(&dbs->iov);
98}
99
100static void dma_complete(DMAAIOCB *dbs, int ret)
101{
Kevin Wolfc57c4652011-11-24 06:15:28 -0500102 trace_dma_complete(dbs, ret, dbs->common.cb);
103
Paolo Bonzini539343c2019-07-29 23:34:16 +0200104 assert(!dbs->acb && !dbs->bh);
Markus Armbruster4be74632014-10-07 13:59:18 +0200105 dma_blk_unmap(dbs);
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200106 if (dbs->common.cb) {
107 dbs->common.cb(dbs->common.opaque, ret);
108 }
109 qemu_iovec_destroy(&dbs->iov);
Fam Zheng80074292014-09-11 13:41:28 +0800110 qemu_aio_unref(dbs);
aliguori7403b142009-03-28 16:11:25 +0000111}
112
Markus Armbruster4be74632014-10-07 13:59:18 +0200113static void dma_blk_cb(void *opaque, int ret)
aliguori7403b142009-03-28 16:11:25 +0000114{
115 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
Stefan Hajnocziabfcd272023-02-21 16:22:17 -0500116 AioContext *ctx = dbs->ctx;
David Gibsonc65bcef2012-06-27 14:50:40 +1000117 dma_addr_t cur_addr, cur_len;
aliguori7403b142009-03-28 16:11:25 +0000118 void *mem;
119
Markus Armbruster4be74632014-10-07 13:59:18 +0200120 trace_dma_blk_cb(dbs, ret);
Kevin Wolfc57c4652011-11-24 06:15:28 -0500121
Stefan Hajnoczi10bcb0d2023-12-05 13:19:59 -0500122 /* DMAAIOCB is not thread-safe and must be accessed only from dbs->ctx */
123 assert(ctx == qemu_get_current_aio_context());
124
aliguori7403b142009-03-28 16:11:25 +0000125 dbs->acb = NULL;
Eric Blaked4f510e2016-05-06 10:26:31 -0600126 dbs->offset += dbs->iov.size;
aliguori59a703e2009-02-05 21:23:58 +0000127
128 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
Paolo Bonzinic3adb5b2011-09-16 16:40:02 +0200129 dma_complete(dbs, ret);
Stefan Hajnoczie661a242023-12-04 11:42:59 -0500130 return;
aliguori59a703e2009-02-05 21:23:58 +0000131 }
Markus Armbruster4be74632014-10-07 13:59:18 +0200132 dma_blk_unmap(dbs);
aliguori59a703e2009-02-05 21:23:58 +0000133
134 while (dbs->sg_cur_index < dbs->sg->nsg) {
135 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
136 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
Philippe Mathieu-Daudéa1d4b0a2020-09-03 11:00:47 +0200137 mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir,
138 MEMTXATTRS_UNSPECIFIED);
Pavel Dovgalyuk5fb0a6b2020-06-03 13:22:02 +0300139 /*
140 * Make reads deterministic in icount mode. Windows sometimes issues
141 * disk read requests with overlapping SGs. It leads
142 * to non-determinism, because resulting buffer contents may be mixed
143 * from several sectors. This code splits all SGs into several
144 * groups. SGs in every group do not overlap.
145 */
Claudio Fontana740b1752020-08-19 13:17:19 +0200146 if (mem && icount_enabled() && dbs->dir == DMA_DIRECTION_FROM_DEVICE) {
Pavel Dovgalyuk5fb0a6b2020-06-03 13:22:02 +0300147 int i;
148 for (i = 0 ; i < dbs->iov.niov ; ++i) {
149 if (ranges_overlap((intptr_t)dbs->iov.iov[i].iov_base,
150 dbs->iov.iov[i].iov_len, (intptr_t)mem,
151 cur_len)) {
152 dma_memory_unmap(dbs->sg->as, mem, cur_len,
153 dbs->dir, cur_len);
154 mem = NULL;
155 break;
156 }
157 }
158 }
aliguori59a703e2009-02-05 21:23:58 +0000159 if (!mem)
160 break;
161 qemu_iovec_add(&dbs->iov, mem, cur_len);
162 dbs->sg_cur_byte += cur_len;
163 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
164 dbs->sg_cur_byte = 0;
165 ++dbs->sg_cur_index;
166 }
167 }
168
169 if (dbs->iov.size == 0) {
Kevin Wolfc57c4652011-11-24 06:15:28 -0500170 trace_dma_map_wait(dbs);
Stefan Hajnocziabfcd272023-02-21 16:22:17 -0500171 dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs);
Mattias Nissler5c627192023-09-07 06:04:23 -0700172 address_space_register_map_client(dbs->sg->as, dbs->bh);
Stefan Hajnoczie661a242023-12-04 11:42:59 -0500173 return;
aliguori59a703e2009-02-05 21:23:58 +0000174 }
175
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400176 if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
177 qemu_iovec_discard_back(&dbs->iov,
178 QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
Kevin Wolf58f423f2014-07-09 19:17:30 +0200179 }
180
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200181 dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
182 dma_blk_cb, dbs, dbs->io_func_opaque);
Paolo Bonzini6bee44e2011-11-14 17:50:52 +0100183 assert(dbs->acb);
aliguori59a703e2009-02-05 21:23:58 +0000184}
185
Markus Armbruster7c84b1b2014-10-07 13:59:14 +0200186static void dma_aio_cancel(BlockAIOCB *acb)
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200187{
188 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
189
Kevin Wolfc57c4652011-11-24 06:15:28 -0500190 trace_dma_aio_cancel(dbs);
191
Paolo Bonzini539343c2019-07-29 23:34:16 +0200192 assert(!(dbs->acb && dbs->bh));
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200193 if (dbs->acb) {
Paolo Bonzini539343c2019-07-29 23:34:16 +0200194 /* This will invoke dma_blk_cb. */
Markus Armbruster4be74632014-10-07 13:59:18 +0200195 blk_aio_cancel_async(dbs->acb);
Paolo Bonzini539343c2019-07-29 23:34:16 +0200196 return;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200197 }
Paolo Bonzini539343c2019-07-29 23:34:16 +0200198
Fam Zhenge95205e2015-03-16 17:03:37 +0800199 if (dbs->bh) {
Mattias Nissler5c627192023-09-07 06:04:23 -0700200 address_space_unregister_map_client(dbs->sg->as, dbs->bh);
Fam Zhenge95205e2015-03-16 17:03:37 +0800201 qemu_bh_delete(dbs->bh);
202 dbs->bh = NULL;
203 }
Paolo Bonzini539343c2019-07-29 23:34:16 +0200204 if (dbs->common.cb) {
205 dbs->common.cb(dbs->common.opaque, -ECANCELED);
206 }
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200207}
208
Stefan Hajnoczid7331be2012-10-31 16:34:37 +0100209static const AIOCBInfo dma_aiocb_info = {
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200210 .aiocb_size = sizeof(DMAAIOCB),
Fam Zheng9bb9da42014-09-11 13:41:14 +0800211 .cancel_async = dma_aio_cancel,
Christoph Hellwigc16b5a22009-05-25 12:37:32 +0200212};
213
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200214BlockAIOCB *dma_blk_io(AioContext *ctx,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400215 QEMUSGList *sg, uint64_t offset, uint32_t align,
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200216 DMAIOFunc *io_func, void *io_func_opaque,
217 BlockCompletionFunc *cb,
David Gibson43cf8ae2012-03-27 13:42:23 +1100218 void *opaque, DMADirection dir)
aliguori59a703e2009-02-05 21:23:58 +0000219{
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200220 DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, NULL, cb, opaque);
aliguori59a703e2009-02-05 21:23:58 +0000221
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200222 trace_dma_blk_io(dbs, io_func_opaque, offset, (dir == DMA_DIRECTION_TO_DEVICE));
Kevin Wolfc57c4652011-11-24 06:15:28 -0500223
aliguori37b78422009-03-20 18:26:16 +0000224 dbs->acb = NULL;
aliguori59a703e2009-02-05 21:23:58 +0000225 dbs->sg = sg;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200226 dbs->ctx = ctx;
Paolo Bonzinicbe0ed62016-05-23 14:54:05 +0200227 dbs->offset = offset;
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400228 dbs->align = align;
aliguori59a703e2009-02-05 21:23:58 +0000229 dbs->sg_cur_index = 0;
230 dbs->sg_cur_byte = 0;
David Gibson43cf8ae2012-03-27 13:42:23 +1100231 dbs->dir = dir;
Christoph Hellwigcb144cc2011-05-19 10:57:59 +0200232 dbs->io_func = io_func;
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200233 dbs->io_func_opaque = io_func_opaque;
aliguori59a703e2009-02-05 21:23:58 +0000234 dbs->bh = NULL;
235 qemu_iovec_init(&dbs->iov, sg->nsg);
Markus Armbruster4be74632014-10-07 13:59:18 +0200236 dma_blk_cb(dbs, 0);
aliguori37b78422009-03-20 18:26:16 +0000237 return &dbs->common;
aliguori59a703e2009-02-05 21:23:58 +0000238}
239
240
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200241static
242BlockAIOCB *dma_blk_read_io_func(int64_t offset, QEMUIOVector *iov,
243 BlockCompletionFunc *cb, void *cb_opaque,
244 void *opaque)
245{
246 BlockBackend *blk = opaque;
247 return blk_aio_preadv(blk, offset, iov, 0, cb, cb_opaque);
248}
249
Markus Armbruster4be74632014-10-07 13:59:18 +0200250BlockAIOCB *dma_blk_read(BlockBackend *blk,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400251 QEMUSGList *sg, uint64_t offset, uint32_t align,
Markus Armbruster4be74632014-10-07 13:59:18 +0200252 void (*cb)(void *opaque, int ret), void *opaque)
253{
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400254 return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
255 dma_blk_read_io_func, blk, cb, opaque,
Markus Armbruster4be74632014-10-07 13:59:18 +0200256 DMA_DIRECTION_FROM_DEVICE);
257}
258
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200259static
260BlockAIOCB *dma_blk_write_io_func(int64_t offset, QEMUIOVector *iov,
261 BlockCompletionFunc *cb, void *cb_opaque,
262 void *opaque)
263{
264 BlockBackend *blk = opaque;
265 return blk_aio_pwritev(blk, offset, iov, 0, cb, cb_opaque);
266}
267
Markus Armbruster4be74632014-10-07 13:59:18 +0200268BlockAIOCB *dma_blk_write(BlockBackend *blk,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400269 QEMUSGList *sg, uint64_t offset, uint32_t align,
Markus Armbruster7c84b1b2014-10-07 13:59:14 +0200270 void (*cb)(void *opaque, int ret), void *opaque)
aliguori59a703e2009-02-05 21:23:58 +0000271{
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400272 return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
273 dma_blk_write_io_func, blk, cb, opaque,
Markus Armbruster4be74632014-10-07 13:59:18 +0200274 DMA_DIRECTION_TO_DEVICE);
aliguori59a703e2009-02-05 21:23:58 +0000275}
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200276
277
Philippe Mathieu-Daudébfa30f32021-12-31 11:33:29 +0100278static MemTxResult dma_buf_rw(void *buf, dma_addr_t len, dma_addr_t *residual,
Philippe Mathieu-Daudé292e1312021-12-15 23:38:52 +0100279 QEMUSGList *sg, DMADirection dir,
280 MemTxAttrs attrs)
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200281{
Philippe Mathieu-Daudéc0ee1522021-12-16 11:24:56 +0100282 uint8_t *ptr = buf;
Philippe Mathieu-Daudébfa30f32021-12-31 11:33:29 +0100283 dma_addr_t xresidual;
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200284 int sg_cur_index;
Philippe Mathieu-Daudé292e1312021-12-15 23:38:52 +0100285 MemTxResult res = MEMTX_OK;
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200286
Philippe Mathieu-Daudé5f412602021-12-31 11:13:34 +0100287 xresidual = sg->size;
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200288 sg_cur_index = 0;
Philippe Mathieu-Daudé5f412602021-12-31 11:13:34 +0100289 len = MIN(len, xresidual);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200290 while (len > 0) {
291 ScatterGatherEntry entry = sg->sg[sg_cur_index++];
Philippe Mathieu-Daudébfa30f32021-12-31 11:33:29 +0100292 dma_addr_t xfer = MIN(len, entry.len);
Philippe Mathieu-Daudé292e1312021-12-15 23:38:52 +0100293 res |= dma_memory_rw(sg->as, entry.base, ptr, xfer, dir, attrs);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200294 ptr += xfer;
295 len -= xfer;
Philippe Mathieu-Daudé5f412602021-12-31 11:13:34 +0100296 xresidual -= xfer;
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200297 }
298
Philippe Mathieu-Daudé5f412602021-12-31 11:13:34 +0100299 if (residual) {
300 *residual = xresidual;
Philippe Mathieu-Daudé292e1312021-12-15 23:38:52 +0100301 }
302 return res;
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200303}
304
Philippe Mathieu-Daudéf02b6642021-12-16 09:36:38 +0100305MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual,
Philippe Mathieu-Daudébfa30f32021-12-31 11:33:29 +0100306 QEMUSGList *sg, MemTxAttrs attrs)
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200307{
Philippe Mathieu-Daudéf02b6642021-12-16 09:36:38 +0100308 return dma_buf_rw(ptr, len, residual, sg, DMA_DIRECTION_FROM_DEVICE, attrs);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200309}
310
Philippe Mathieu-Daudéf02b6642021-12-16 09:36:38 +0100311MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual,
Philippe Mathieu-Daudébfa30f32021-12-31 11:33:29 +0100312 QEMUSGList *sg, MemTxAttrs attrs)
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200313{
Philippe Mathieu-Daudéf02b6642021-12-16 09:36:38 +0100314 return dma_buf_rw(ptr, len, residual, sg, DMA_DIRECTION_TO_DEVICE, attrs);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200315}
Paolo Bonzini84a69352011-09-05 14:20:29 +0200316
Markus Armbruster4be74632014-10-07 13:59:18 +0200317void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
Paolo Bonzini84a69352011-09-05 14:20:29 +0200318 QEMUSGList *sg, enum BlockAcctType type)
319{
Markus Armbruster4be74632014-10-07 13:59:18 +0200320 block_acct_start(blk_get_stats(blk), cookie, sg->size, type);
Paolo Bonzini84a69352011-09-05 14:20:29 +0200321}
Eric Augerf14fb6c2021-03-09 11:27:37 +0100322
323uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end, int max_addr_bits)
324{
325 uint64_t max_mask = UINT64_MAX, addr_mask = end - start;
326 uint64_t alignment_mask, size_mask;
327
328 if (max_addr_bits != 64) {
329 max_mask = (1ULL << max_addr_bits) - 1;
330 }
331
332 alignment_mask = start ? (start & -start) - 1 : max_mask;
333 alignment_mask = MIN(alignment_mask, max_mask);
334 size_mask = MIN(addr_mask, max_mask);
335
336 if (alignment_mask <= size_mask) {
337 /* Increase the alignment of start */
338 return alignment_mask;
339 } else {
340 /* Find the largest page mask from size */
341 if (addr_mask == UINT64_MAX) {
342 return UINT64_MAX;
343 }
344 return (1ULL << (63 - clz64(addr_mask + 1))) - 1;
345 }
346}
347