aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 1 | /* |
| 2 | * DMA helper functions |
| 3 | * |
Philippe Mathieu-Daudé | bb755f5 | 2020-10-23 17:19:17 +0200 | [diff] [blame] | 4 | * Copyright (c) 2009,2020 Red Hat |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 5 | * |
| 6 | * This work is licensed under the terms of the GNU General Public License |
| 7 | * (GNU GPL), version 2 or later. |
| 8 | */ |
| 9 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 10 | #include "qemu/osdep.h" |
Philippe Mathieu-Daudé | 32cad1f | 2024-12-03 15:20:13 +0100 | [diff] [blame] | 11 | #include "system/block-backend.h" |
| 12 | #include "system/dma.h" |
Peter Maydell | 0139a4f | 2024-11-19 13:02:07 +0000 | [diff] [blame] | 13 | #include "trace.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 14 | #include "qemu/thread.h" |
Alex Bligh | 6a1751b | 2013-08-21 16:02:47 +0100 | [diff] [blame] | 15 | #include "qemu/main-loop.h" |
Philippe Mathieu-Daudé | 32cad1f | 2024-12-03 15:20:13 +0100 | [diff] [blame] | 16 | #include "system/cpu-timers.h" |
Pavel Dovgalyuk | 5fb0a6b | 2020-06-03 13:22:02 +0300 | [diff] [blame] | 17 | #include "qemu/range.h" |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 18 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 19 | /* #define DEBUG_IOMMU */ |
| 20 | |
Philippe Mathieu-Daudé | bb755f5 | 2020-10-23 17:19:17 +0200 | [diff] [blame] | 21 | MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr, |
Philippe Mathieu-Daudé | 7a36e42 | 2020-09-03 10:28:32 +0200 | [diff] [blame] | 22 | uint8_t c, dma_addr_t len, MemTxAttrs attrs) |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 23 | { |
Paolo Bonzini | df32fd1 | 2013-04-10 18:15:49 +0200 | [diff] [blame] | 24 | dma_barrier(as, DMA_DIRECTION_FROM_DEVICE); |
Paolo Bonzini | 24addbc | 2013-04-10 17:49:04 +0200 | [diff] [blame] | 25 | |
Philippe Mathieu-Daudé | 75f01c6 | 2022-01-15 21:37:23 +0100 | [diff] [blame] | 26 | return address_space_set(as, addr, c, len, attrs); |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 27 | } |
| 28 | |
Paolo Bonzini | f487b67 | 2013-06-03 14:17:19 +0200 | [diff] [blame] | 29 | void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, |
| 30 | AddressSpace *as) |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 31 | { |
Markus Armbruster | b21e238 | 2022-03-15 15:41:56 +0100 | [diff] [blame] | 32 | qsg->sg = g_new(ScatterGatherEntry, alloc_hint); |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 33 | qsg->nsg = 0; |
| 34 | qsg->nalloc = alloc_hint; |
| 35 | qsg->size = 0; |
Paolo Bonzini | df32fd1 | 2013-04-10 18:15:49 +0200 | [diff] [blame] | 36 | qsg->as = as; |
Paolo Bonzini | f487b67 | 2013-06-03 14:17:19 +0200 | [diff] [blame] | 37 | qsg->dev = dev; |
| 38 | object_ref(OBJECT(dev)); |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 39 | } |
| 40 | |
David Gibson | d323118 | 2011-10-31 17:06:46 +1100 | [diff] [blame] | 41 | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len) |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 42 | { |
| 43 | if (qsg->nsg == qsg->nalloc) { |
| 44 | qsg->nalloc = 2 * qsg->nalloc + 1; |
Markus Armbruster | b21e238 | 2022-03-15 15:41:56 +0100 | [diff] [blame] | 45 | qsg->sg = g_renew(ScatterGatherEntry, qsg->sg, qsg->nalloc); |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 46 | } |
| 47 | qsg->sg[qsg->nsg].base = base; |
| 48 | qsg->sg[qsg->nsg].len = len; |
| 49 | qsg->size += len; |
| 50 | ++qsg->nsg; |
| 51 | } |
| 52 | |
| 53 | void qemu_sglist_destroy(QEMUSGList *qsg) |
| 54 | { |
Paolo Bonzini | f487b67 | 2013-06-03 14:17:19 +0200 | [diff] [blame] | 55 | object_unref(OBJECT(qsg->dev)); |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 56 | g_free(qsg->sg); |
Jason Baron | ea8d82a | 2012-08-03 15:57:10 -0400 | [diff] [blame] | 57 | memset(qsg, 0, sizeof(*qsg)); |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 58 | } |
| 59 | |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 60 | typedef struct { |
Markus Armbruster | 7c84b1b | 2014-10-07 13:59:14 +0200 | [diff] [blame] | 61 | BlockAIOCB common; |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 62 | AioContext *ctx; |
Markus Armbruster | 7c84b1b | 2014-10-07 13:59:14 +0200 | [diff] [blame] | 63 | BlockAIOCB *acb; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 64 | QEMUSGList *sg; |
Mark Cave-Ayland | 99868af | 2016-10-27 16:29:13 -0400 | [diff] [blame] | 65 | uint32_t align; |
Eric Blake | d4f510e | 2016-05-06 10:26:31 -0600 | [diff] [blame] | 66 | uint64_t offset; |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 67 | DMADirection dir; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 68 | int sg_cur_index; |
David Gibson | d323118 | 2011-10-31 17:06:46 +1100 | [diff] [blame] | 69 | dma_addr_t sg_cur_byte; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 70 | QEMUIOVector iov; |
| 71 | QEMUBH *bh; |
Christoph Hellwig | cb144cc | 2011-05-19 10:57:59 +0200 | [diff] [blame] | 72 | DMAIOFunc *io_func; |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 73 | void *io_func_opaque; |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 74 | } DMAAIOCB; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 75 | |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 76 | static void dma_blk_cb(void *opaque, int ret); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 77 | |
| 78 | static void reschedule_dma(void *opaque) |
| 79 | { |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 80 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 81 | |
Paolo Bonzini | 539343c | 2019-07-29 23:34:16 +0200 | [diff] [blame] | 82 | assert(!dbs->acb && dbs->bh); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 83 | qemu_bh_delete(dbs->bh); |
| 84 | dbs->bh = NULL; |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 85 | dma_blk_cb(dbs, 0); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 86 | } |
| 87 | |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 88 | static void dma_blk_unmap(DMAAIOCB *dbs) |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 89 | { |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 90 | int i; |
| 91 | |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 92 | for (i = 0; i < dbs->iov.niov; ++i) { |
Paolo Bonzini | df32fd1 | 2013-04-10 18:15:49 +0200 | [diff] [blame] | 93 | dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base, |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 94 | dbs->iov.iov[i].iov_len, dbs->dir, |
| 95 | dbs->iov.iov[i].iov_len); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 96 | } |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 97 | qemu_iovec_reset(&dbs->iov); |
| 98 | } |
| 99 | |
| 100 | static void dma_complete(DMAAIOCB *dbs, int ret) |
| 101 | { |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 102 | trace_dma_complete(dbs, ret, dbs->common.cb); |
| 103 | |
Paolo Bonzini | 539343c | 2019-07-29 23:34:16 +0200 | [diff] [blame] | 104 | assert(!dbs->acb && !dbs->bh); |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 105 | dma_blk_unmap(dbs); |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 106 | if (dbs->common.cb) { |
| 107 | dbs->common.cb(dbs->common.opaque, ret); |
| 108 | } |
| 109 | qemu_iovec_destroy(&dbs->iov); |
Fam Zheng | 8007429 | 2014-09-11 13:41:28 +0800 | [diff] [blame] | 110 | qemu_aio_unref(dbs); |
aliguori | 7403b14 | 2009-03-28 16:11:25 +0000 | [diff] [blame] | 111 | } |
| 112 | |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 113 | static void dma_blk_cb(void *opaque, int ret) |
aliguori | 7403b14 | 2009-03-28 16:11:25 +0000 | [diff] [blame] | 114 | { |
| 115 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
Stefan Hajnoczi | abfcd27 | 2023-02-21 16:22:17 -0500 | [diff] [blame] | 116 | AioContext *ctx = dbs->ctx; |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 117 | dma_addr_t cur_addr, cur_len; |
aliguori | 7403b14 | 2009-03-28 16:11:25 +0000 | [diff] [blame] | 118 | void *mem; |
| 119 | |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 120 | trace_dma_blk_cb(dbs, ret); |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 121 | |
Stefan Hajnoczi | 10bcb0d | 2023-12-05 13:19:59 -0500 | [diff] [blame] | 122 | /* DMAAIOCB is not thread-safe and must be accessed only from dbs->ctx */ |
| 123 | assert(ctx == qemu_get_current_aio_context()); |
| 124 | |
aliguori | 7403b14 | 2009-03-28 16:11:25 +0000 | [diff] [blame] | 125 | dbs->acb = NULL; |
Eric Blake | d4f510e | 2016-05-06 10:26:31 -0600 | [diff] [blame] | 126 | dbs->offset += dbs->iov.size; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 127 | |
| 128 | if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 129 | dma_complete(dbs, ret); |
Stefan Hajnoczi | e661a24 | 2023-12-04 11:42:59 -0500 | [diff] [blame] | 130 | return; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 131 | } |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 132 | dma_blk_unmap(dbs); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 133 | |
| 134 | while (dbs->sg_cur_index < dbs->sg->nsg) { |
| 135 | cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
| 136 | cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
Philippe Mathieu-Daudé | a1d4b0a | 2020-09-03 11:00:47 +0200 | [diff] [blame] | 137 | mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir, |
| 138 | MEMTXATTRS_UNSPECIFIED); |
Pavel Dovgalyuk | 5fb0a6b | 2020-06-03 13:22:02 +0300 | [diff] [blame] | 139 | /* |
| 140 | * Make reads deterministic in icount mode. Windows sometimes issues |
| 141 | * disk read requests with overlapping SGs. It leads |
| 142 | * to non-determinism, because resulting buffer contents may be mixed |
| 143 | * from several sectors. This code splits all SGs into several |
| 144 | * groups. SGs in every group do not overlap. |
| 145 | */ |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 146 | if (mem && icount_enabled() && dbs->dir == DMA_DIRECTION_FROM_DEVICE) { |
Pavel Dovgalyuk | 5fb0a6b | 2020-06-03 13:22:02 +0300 | [diff] [blame] | 147 | int i; |
| 148 | for (i = 0 ; i < dbs->iov.niov ; ++i) { |
| 149 | if (ranges_overlap((intptr_t)dbs->iov.iov[i].iov_base, |
| 150 | dbs->iov.iov[i].iov_len, (intptr_t)mem, |
| 151 | cur_len)) { |
| 152 | dma_memory_unmap(dbs->sg->as, mem, cur_len, |
| 153 | dbs->dir, cur_len); |
| 154 | mem = NULL; |
| 155 | break; |
| 156 | } |
| 157 | } |
| 158 | } |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 159 | if (!mem) |
| 160 | break; |
| 161 | qemu_iovec_add(&dbs->iov, mem, cur_len); |
| 162 | dbs->sg_cur_byte += cur_len; |
| 163 | if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) { |
| 164 | dbs->sg_cur_byte = 0; |
| 165 | ++dbs->sg_cur_index; |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | if (dbs->iov.size == 0) { |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 170 | trace_dma_map_wait(dbs); |
Stefan Hajnoczi | abfcd27 | 2023-02-21 16:22:17 -0500 | [diff] [blame] | 171 | dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs); |
Mattias Nissler | 5c62719 | 2023-09-07 06:04:23 -0700 | [diff] [blame] | 172 | address_space_register_map_client(dbs->sg->as, dbs->bh); |
Stefan Hajnoczi | e661a24 | 2023-12-04 11:42:59 -0500 | [diff] [blame] | 173 | return; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 174 | } |
| 175 | |
Mark Cave-Ayland | 99868af | 2016-10-27 16:29:13 -0400 | [diff] [blame] | 176 | if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) { |
| 177 | qemu_iovec_discard_back(&dbs->iov, |
| 178 | QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align)); |
Kevin Wolf | 58f423f | 2014-07-09 19:17:30 +0200 | [diff] [blame] | 179 | } |
| 180 | |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 181 | dbs->acb = dbs->io_func(dbs->offset, &dbs->iov, |
| 182 | dma_blk_cb, dbs, dbs->io_func_opaque); |
Paolo Bonzini | 6bee44e | 2011-11-14 17:50:52 +0100 | [diff] [blame] | 183 | assert(dbs->acb); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 184 | } |
| 185 | |
Markus Armbruster | 7c84b1b | 2014-10-07 13:59:14 +0200 | [diff] [blame] | 186 | static void dma_aio_cancel(BlockAIOCB *acb) |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 187 | { |
| 188 | DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
| 189 | |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 190 | trace_dma_aio_cancel(dbs); |
| 191 | |
Paolo Bonzini | 539343c | 2019-07-29 23:34:16 +0200 | [diff] [blame] | 192 | assert(!(dbs->acb && dbs->bh)); |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 193 | if (dbs->acb) { |
Paolo Bonzini | 539343c | 2019-07-29 23:34:16 +0200 | [diff] [blame] | 194 | /* This will invoke dma_blk_cb. */ |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 195 | blk_aio_cancel_async(dbs->acb); |
Paolo Bonzini | 539343c | 2019-07-29 23:34:16 +0200 | [diff] [blame] | 196 | return; |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 197 | } |
Paolo Bonzini | 539343c | 2019-07-29 23:34:16 +0200 | [diff] [blame] | 198 | |
Fam Zheng | e95205e | 2015-03-16 17:03:37 +0800 | [diff] [blame] | 199 | if (dbs->bh) { |
Mattias Nissler | 5c62719 | 2023-09-07 06:04:23 -0700 | [diff] [blame] | 200 | address_space_unregister_map_client(dbs->sg->as, dbs->bh); |
Fam Zheng | e95205e | 2015-03-16 17:03:37 +0800 | [diff] [blame] | 201 | qemu_bh_delete(dbs->bh); |
| 202 | dbs->bh = NULL; |
| 203 | } |
Paolo Bonzini | 539343c | 2019-07-29 23:34:16 +0200 | [diff] [blame] | 204 | if (dbs->common.cb) { |
| 205 | dbs->common.cb(dbs->common.opaque, -ECANCELED); |
| 206 | } |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 207 | } |
| 208 | |
Stefan Hajnoczi | d7331be | 2012-10-31 16:34:37 +0100 | [diff] [blame] | 209 | static const AIOCBInfo dma_aiocb_info = { |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 210 | .aiocb_size = sizeof(DMAAIOCB), |
Fam Zheng | 9bb9da4 | 2014-09-11 13:41:14 +0800 | [diff] [blame] | 211 | .cancel_async = dma_aio_cancel, |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 212 | }; |
| 213 | |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 214 | BlockAIOCB *dma_blk_io(AioContext *ctx, |
Mark Cave-Ayland | 99868af | 2016-10-27 16:29:13 -0400 | [diff] [blame] | 215 | QEMUSGList *sg, uint64_t offset, uint32_t align, |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 216 | DMAIOFunc *io_func, void *io_func_opaque, |
| 217 | BlockCompletionFunc *cb, |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 218 | void *opaque, DMADirection dir) |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 219 | { |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 220 | DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, NULL, cb, opaque); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 221 | |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 222 | trace_dma_blk_io(dbs, io_func_opaque, offset, (dir == DMA_DIRECTION_TO_DEVICE)); |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 223 | |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 224 | dbs->acb = NULL; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 225 | dbs->sg = sg; |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 226 | dbs->ctx = ctx; |
Paolo Bonzini | cbe0ed6 | 2016-05-23 14:54:05 +0200 | [diff] [blame] | 227 | dbs->offset = offset; |
Mark Cave-Ayland | 99868af | 2016-10-27 16:29:13 -0400 | [diff] [blame] | 228 | dbs->align = align; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 229 | dbs->sg_cur_index = 0; |
| 230 | dbs->sg_cur_byte = 0; |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 231 | dbs->dir = dir; |
Christoph Hellwig | cb144cc | 2011-05-19 10:57:59 +0200 | [diff] [blame] | 232 | dbs->io_func = io_func; |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 233 | dbs->io_func_opaque = io_func_opaque; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 234 | dbs->bh = NULL; |
| 235 | qemu_iovec_init(&dbs->iov, sg->nsg); |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 236 | dma_blk_cb(dbs, 0); |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 237 | return &dbs->common; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 238 | } |
| 239 | |
| 240 | |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 241 | static |
| 242 | BlockAIOCB *dma_blk_read_io_func(int64_t offset, QEMUIOVector *iov, |
| 243 | BlockCompletionFunc *cb, void *cb_opaque, |
| 244 | void *opaque) |
| 245 | { |
| 246 | BlockBackend *blk = opaque; |
| 247 | return blk_aio_preadv(blk, offset, iov, 0, cb, cb_opaque); |
| 248 | } |
| 249 | |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 250 | BlockAIOCB *dma_blk_read(BlockBackend *blk, |
Mark Cave-Ayland | 99868af | 2016-10-27 16:29:13 -0400 | [diff] [blame] | 251 | QEMUSGList *sg, uint64_t offset, uint32_t align, |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 252 | void (*cb)(void *opaque, int ret), void *opaque) |
| 253 | { |
Mark Cave-Ayland | 99868af | 2016-10-27 16:29:13 -0400 | [diff] [blame] | 254 | return dma_blk_io(blk_get_aio_context(blk), sg, offset, align, |
| 255 | dma_blk_read_io_func, blk, cb, opaque, |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 256 | DMA_DIRECTION_FROM_DEVICE); |
| 257 | } |
| 258 | |
Paolo Bonzini | 8a8e63e | 2016-05-23 14:54:06 +0200 | [diff] [blame] | 259 | static |
| 260 | BlockAIOCB *dma_blk_write_io_func(int64_t offset, QEMUIOVector *iov, |
| 261 | BlockCompletionFunc *cb, void *cb_opaque, |
| 262 | void *opaque) |
| 263 | { |
| 264 | BlockBackend *blk = opaque; |
| 265 | return blk_aio_pwritev(blk, offset, iov, 0, cb, cb_opaque); |
| 266 | } |
| 267 | |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 268 | BlockAIOCB *dma_blk_write(BlockBackend *blk, |
Mark Cave-Ayland | 99868af | 2016-10-27 16:29:13 -0400 | [diff] [blame] | 269 | QEMUSGList *sg, uint64_t offset, uint32_t align, |
Markus Armbruster | 7c84b1b | 2014-10-07 13:59:14 +0200 | [diff] [blame] | 270 | void (*cb)(void *opaque, int ret), void *opaque) |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 271 | { |
Mark Cave-Ayland | 99868af | 2016-10-27 16:29:13 -0400 | [diff] [blame] | 272 | return dma_blk_io(blk_get_aio_context(blk), sg, offset, align, |
| 273 | dma_blk_write_io_func, blk, cb, opaque, |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 274 | DMA_DIRECTION_TO_DEVICE); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 275 | } |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 276 | |
| 277 | |
Philippe Mathieu-Daudé | bfa30f3 | 2021-12-31 11:33:29 +0100 | [diff] [blame] | 278 | static MemTxResult dma_buf_rw(void *buf, dma_addr_t len, dma_addr_t *residual, |
Philippe Mathieu-Daudé | 292e131 | 2021-12-15 23:38:52 +0100 | [diff] [blame] | 279 | QEMUSGList *sg, DMADirection dir, |
| 280 | MemTxAttrs attrs) |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 281 | { |
Philippe Mathieu-Daudé | c0ee152 | 2021-12-16 11:24:56 +0100 | [diff] [blame] | 282 | uint8_t *ptr = buf; |
Philippe Mathieu-Daudé | bfa30f3 | 2021-12-31 11:33:29 +0100 | [diff] [blame] | 283 | dma_addr_t xresidual; |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 284 | int sg_cur_index; |
Philippe Mathieu-Daudé | 292e131 | 2021-12-15 23:38:52 +0100 | [diff] [blame] | 285 | MemTxResult res = MEMTX_OK; |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 286 | |
Philippe Mathieu-Daudé | 5f41260 | 2021-12-31 11:13:34 +0100 | [diff] [blame] | 287 | xresidual = sg->size; |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 288 | sg_cur_index = 0; |
Philippe Mathieu-Daudé | 5f41260 | 2021-12-31 11:13:34 +0100 | [diff] [blame] | 289 | len = MIN(len, xresidual); |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 290 | while (len > 0) { |
| 291 | ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
Philippe Mathieu-Daudé | bfa30f3 | 2021-12-31 11:33:29 +0100 | [diff] [blame] | 292 | dma_addr_t xfer = MIN(len, entry.len); |
Philippe Mathieu-Daudé | 292e131 | 2021-12-15 23:38:52 +0100 | [diff] [blame] | 293 | res |= dma_memory_rw(sg->as, entry.base, ptr, xfer, dir, attrs); |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 294 | ptr += xfer; |
| 295 | len -= xfer; |
Philippe Mathieu-Daudé | 5f41260 | 2021-12-31 11:13:34 +0100 | [diff] [blame] | 296 | xresidual -= xfer; |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 297 | } |
| 298 | |
Philippe Mathieu-Daudé | 5f41260 | 2021-12-31 11:13:34 +0100 | [diff] [blame] | 299 | if (residual) { |
| 300 | *residual = xresidual; |
Philippe Mathieu-Daudé | 292e131 | 2021-12-15 23:38:52 +0100 | [diff] [blame] | 301 | } |
| 302 | return res; |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 303 | } |
| 304 | |
Philippe Mathieu-Daudé | f02b664 | 2021-12-16 09:36:38 +0100 | [diff] [blame] | 305 | MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual, |
Philippe Mathieu-Daudé | bfa30f3 | 2021-12-31 11:33:29 +0100 | [diff] [blame] | 306 | QEMUSGList *sg, MemTxAttrs attrs) |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 307 | { |
Philippe Mathieu-Daudé | f02b664 | 2021-12-16 09:36:38 +0100 | [diff] [blame] | 308 | return dma_buf_rw(ptr, len, residual, sg, DMA_DIRECTION_FROM_DEVICE, attrs); |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 309 | } |
| 310 | |
Philippe Mathieu-Daudé | f02b664 | 2021-12-16 09:36:38 +0100 | [diff] [blame] | 311 | MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual, |
Philippe Mathieu-Daudé | bfa30f3 | 2021-12-31 11:33:29 +0100 | [diff] [blame] | 312 | QEMUSGList *sg, MemTxAttrs attrs) |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 313 | { |
Philippe Mathieu-Daudé | f02b664 | 2021-12-16 09:36:38 +0100 | [diff] [blame] | 314 | return dma_buf_rw(ptr, len, residual, sg, DMA_DIRECTION_TO_DEVICE, attrs); |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 315 | } |
Paolo Bonzini | 84a6935 | 2011-09-05 14:20:29 +0200 | [diff] [blame] | 316 | |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 317 | void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, |
Paolo Bonzini | 84a6935 | 2011-09-05 14:20:29 +0200 | [diff] [blame] | 318 | QEMUSGList *sg, enum BlockAcctType type) |
| 319 | { |
Markus Armbruster | 4be7463 | 2014-10-07 13:59:18 +0200 | [diff] [blame] | 320 | block_acct_start(blk_get_stats(blk), cookie, sg->size, type); |
Paolo Bonzini | 84a6935 | 2011-09-05 14:20:29 +0200 | [diff] [blame] | 321 | } |
Eric Auger | f14fb6c | 2021-03-09 11:27:37 +0100 | [diff] [blame] | 322 | |
| 323 | uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end, int max_addr_bits) |
| 324 | { |
| 325 | uint64_t max_mask = UINT64_MAX, addr_mask = end - start; |
| 326 | uint64_t alignment_mask, size_mask; |
| 327 | |
| 328 | if (max_addr_bits != 64) { |
| 329 | max_mask = (1ULL << max_addr_bits) - 1; |
| 330 | } |
| 331 | |
| 332 | alignment_mask = start ? (start & -start) - 1 : max_mask; |
| 333 | alignment_mask = MIN(alignment_mask, max_mask); |
| 334 | size_mask = MIN(addr_mask, max_mask); |
| 335 | |
| 336 | if (alignment_mask <= size_mask) { |
| 337 | /* Increase the alignment of start */ |
| 338 | return alignment_mask; |
| 339 | } else { |
| 340 | /* Find the largest page mask from size */ |
| 341 | if (addr_mask == UINT64_MAX) { |
| 342 | return UINT64_MAX; |
| 343 | } |
| 344 | return (1ULL << (63 - clz64(addr_mask + 1))) - 1; |
| 345 | } |
| 346 | } |
| 347 | |