aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 1 | /* |
| 2 | * DMA helper functions |
| 3 | * |
| 4 | * Copyright (c) 2009 Red Hat |
| 5 | * |
| 6 | * This work is licensed under the terms of the GNU General Public License |
| 7 | * (GNU GPL), version 2 or later. |
| 8 | */ |
| 9 | |
| 10 | #include "dma.h" |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 11 | #include "trace.h" |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 12 | #include "range.h" |
| 13 | #include "qemu-thread.h" |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 14 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 15 | /* #define DEBUG_IOMMU */ |
| 16 | |
| 17 | static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len) |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 18 | { |
| 19 | #define FILLBUF_SIZE 512 |
| 20 | uint8_t fillbuf[FILLBUF_SIZE]; |
| 21 | int l; |
| 22 | |
| 23 | memset(fillbuf, c, FILLBUF_SIZE); |
| 24 | while (len > 0) { |
| 25 | l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; |
| 26 | cpu_physical_memory_rw(addr, fillbuf, l, true); |
| 27 | len -= len; |
| 28 | addr += len; |
| 29 | } |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 30 | } |
| 31 | |
| 32 | int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len) |
| 33 | { |
Benjamin Herrenschmidt | 7a0bac4 | 2012-06-27 14:50:47 +1000 | [diff] [blame] | 34 | dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE); |
| 35 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 36 | if (dma_has_iommu(dma)) { |
| 37 | return iommu_dma_memory_set(dma, addr, c, len); |
| 38 | } |
| 39 | do_dma_memory_set(addr, c, len); |
| 40 | |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 41 | return 0; |
| 42 | } |
| 43 | |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 44 | void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma) |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 45 | { |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 46 | qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry)); |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 47 | qsg->nsg = 0; |
| 48 | qsg->nalloc = alloc_hint; |
| 49 | qsg->size = 0; |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 50 | qsg->dma = dma; |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 51 | } |
| 52 | |
David Gibson | d323118 | 2011-10-31 17:06:46 +1100 | [diff] [blame] | 53 | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len) |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 54 | { |
| 55 | if (qsg->nsg == qsg->nalloc) { |
| 56 | qsg->nalloc = 2 * qsg->nalloc + 1; |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 57 | qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry)); |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 58 | } |
| 59 | qsg->sg[qsg->nsg].base = base; |
| 60 | qsg->sg[qsg->nsg].len = len; |
| 61 | qsg->size += len; |
| 62 | ++qsg->nsg; |
| 63 | } |
| 64 | |
| 65 | void qemu_sglist_destroy(QEMUSGList *qsg) |
| 66 | { |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 67 | g_free(qsg->sg); |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 68 | } |
| 69 | |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 70 | typedef struct { |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 71 | BlockDriverAIOCB common; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 72 | BlockDriverState *bs; |
| 73 | BlockDriverAIOCB *acb; |
| 74 | QEMUSGList *sg; |
| 75 | uint64_t sector_num; |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 76 | DMADirection dir; |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 77 | bool in_cancel; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 78 | int sg_cur_index; |
David Gibson | d323118 | 2011-10-31 17:06:46 +1100 | [diff] [blame] | 79 | dma_addr_t sg_cur_byte; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 80 | QEMUIOVector iov; |
| 81 | QEMUBH *bh; |
Christoph Hellwig | cb144cc | 2011-05-19 10:57:59 +0200 | [diff] [blame] | 82 | DMAIOFunc *io_func; |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 83 | } DMAAIOCB; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 84 | |
| 85 | static void dma_bdrv_cb(void *opaque, int ret); |
| 86 | |
| 87 | static void reschedule_dma(void *opaque) |
| 88 | { |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 89 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 90 | |
| 91 | qemu_bh_delete(dbs->bh); |
| 92 | dbs->bh = NULL; |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 93 | dma_bdrv_cb(dbs, 0); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | static void continue_after_map_failure(void *opaque) |
| 97 | { |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 98 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 99 | |
| 100 | dbs->bh = qemu_bh_new(reschedule_dma, dbs); |
| 101 | qemu_bh_schedule(dbs->bh); |
| 102 | } |
| 103 | |
aliguori | 7403b14 | 2009-03-28 16:11:25 +0000 | [diff] [blame] | 104 | static void dma_bdrv_unmap(DMAAIOCB *dbs) |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 105 | { |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 106 | int i; |
| 107 | |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 108 | for (i = 0; i < dbs->iov.niov; ++i) { |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 109 | dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base, |
| 110 | dbs->iov.iov[i].iov_len, dbs->dir, |
| 111 | dbs->iov.iov[i].iov_len); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 112 | } |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 113 | qemu_iovec_reset(&dbs->iov); |
| 114 | } |
| 115 | |
| 116 | static void dma_complete(DMAAIOCB *dbs, int ret) |
| 117 | { |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 118 | trace_dma_complete(dbs, ret, dbs->common.cb); |
| 119 | |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 120 | dma_bdrv_unmap(dbs); |
| 121 | if (dbs->common.cb) { |
| 122 | dbs->common.cb(dbs->common.opaque, ret); |
| 123 | } |
| 124 | qemu_iovec_destroy(&dbs->iov); |
| 125 | if (dbs->bh) { |
| 126 | qemu_bh_delete(dbs->bh); |
| 127 | dbs->bh = NULL; |
| 128 | } |
| 129 | if (!dbs->in_cancel) { |
| 130 | /* Requests may complete while dma_aio_cancel is in progress. In |
| 131 | * this case, the AIOCB should not be released because it is still |
| 132 | * referenced by dma_aio_cancel. */ |
| 133 | qemu_aio_release(dbs); |
| 134 | } |
aliguori | 7403b14 | 2009-03-28 16:11:25 +0000 | [diff] [blame] | 135 | } |
| 136 | |
blueswir1 | 856ae5c | 2009-04-07 17:57:09 +0000 | [diff] [blame] | 137 | static void dma_bdrv_cb(void *opaque, int ret) |
aliguori | 7403b14 | 2009-03-28 16:11:25 +0000 | [diff] [blame] | 138 | { |
| 139 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 140 | dma_addr_t cur_addr, cur_len; |
aliguori | 7403b14 | 2009-03-28 16:11:25 +0000 | [diff] [blame] | 141 | void *mem; |
| 142 | |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 143 | trace_dma_bdrv_cb(dbs, ret); |
| 144 | |
aliguori | 7403b14 | 2009-03-28 16:11:25 +0000 | [diff] [blame] | 145 | dbs->acb = NULL; |
| 146 | dbs->sector_num += dbs->iov.size / 512; |
| 147 | dma_bdrv_unmap(dbs); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 148 | |
| 149 | if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 150 | dma_complete(dbs, ret); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 151 | return; |
| 152 | } |
| 153 | |
| 154 | while (dbs->sg_cur_index < dbs->sg->nsg) { |
| 155 | cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
| 156 | cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 157 | mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 158 | if (!mem) |
| 159 | break; |
| 160 | qemu_iovec_add(&dbs->iov, mem, cur_len); |
| 161 | dbs->sg_cur_byte += cur_len; |
| 162 | if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) { |
| 163 | dbs->sg_cur_byte = 0; |
| 164 | ++dbs->sg_cur_index; |
| 165 | } |
| 166 | } |
| 167 | |
| 168 | if (dbs->iov.size == 0) { |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 169 | trace_dma_map_wait(dbs); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 170 | cpu_register_map_client(dbs, continue_after_map_failure); |
| 171 | return; |
| 172 | } |
| 173 | |
Christoph Hellwig | cb144cc | 2011-05-19 10:57:59 +0200 | [diff] [blame] | 174 | dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov, |
| 175 | dbs->iov.size / 512, dma_bdrv_cb, dbs); |
Paolo Bonzini | 6bee44e | 2011-11-14 17:50:52 +0100 | [diff] [blame] | 176 | assert(dbs->acb); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 177 | } |
| 178 | |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 179 | static void dma_aio_cancel(BlockDriverAIOCB *acb) |
| 180 | { |
| 181 | DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
| 182 | |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 183 | trace_dma_aio_cancel(dbs); |
| 184 | |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 185 | if (dbs->acb) { |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 186 | BlockDriverAIOCB *acb = dbs->acb; |
| 187 | dbs->acb = NULL; |
| 188 | dbs->in_cancel = true; |
| 189 | bdrv_aio_cancel(acb); |
| 190 | dbs->in_cancel = false; |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 191 | } |
Paolo Bonzini | c3adb5b | 2011-09-16 16:40:02 +0200 | [diff] [blame] | 192 | dbs->common.cb = NULL; |
| 193 | dma_complete(dbs, 0); |
Christoph Hellwig | c16b5a2 | 2009-05-25 12:37:32 +0200 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | static AIOPool dma_aio_pool = { |
| 197 | .aiocb_size = sizeof(DMAAIOCB), |
| 198 | .cancel = dma_aio_cancel, |
| 199 | }; |
| 200 | |
Christoph Hellwig | cb144cc | 2011-05-19 10:57:59 +0200 | [diff] [blame] | 201 | BlockDriverAIOCB *dma_bdrv_io( |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 202 | BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num, |
Christoph Hellwig | cb144cc | 2011-05-19 10:57:59 +0200 | [diff] [blame] | 203 | DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 204 | void *opaque, DMADirection dir) |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 205 | { |
Christoph Hellwig | cb144cc | 2011-05-19 10:57:59 +0200 | [diff] [blame] | 206 | DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 207 | |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 208 | trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); |
Kevin Wolf | c57c465 | 2011-11-24 06:15:28 -0500 | [diff] [blame] | 209 | |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 210 | dbs->acb = NULL; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 211 | dbs->bs = bs; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 212 | dbs->sg = sg; |
| 213 | dbs->sector_num = sector_num; |
| 214 | dbs->sg_cur_index = 0; |
| 215 | dbs->sg_cur_byte = 0; |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 216 | dbs->dir = dir; |
Christoph Hellwig | cb144cc | 2011-05-19 10:57:59 +0200 | [diff] [blame] | 217 | dbs->io_func = io_func; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 218 | dbs->bh = NULL; |
| 219 | qemu_iovec_init(&dbs->iov, sg->nsg); |
| 220 | dma_bdrv_cb(dbs, 0); |
aliguori | 37b7842 | 2009-03-20 18:26:16 +0000 | [diff] [blame] | 221 | return &dbs->common; |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 222 | } |
| 223 | |
| 224 | |
| 225 | BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
| 226 | QEMUSGList *sg, uint64_t sector, |
| 227 | void (*cb)(void *opaque, int ret), void *opaque) |
| 228 | { |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 229 | return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, |
| 230 | DMA_DIRECTION_FROM_DEVICE); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, |
| 234 | QEMUSGList *sg, uint64_t sector, |
| 235 | void (*cb)(void *opaque, int ret), void *opaque) |
| 236 | { |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 237 | return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, |
| 238 | DMA_DIRECTION_TO_DEVICE); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 239 | } |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 240 | |
| 241 | |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 242 | static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, |
| 243 | DMADirection dir) |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 244 | { |
| 245 | uint64_t resid; |
| 246 | int sg_cur_index; |
| 247 | |
| 248 | resid = sg->size; |
| 249 | sg_cur_index = 0; |
| 250 | len = MIN(len, resid); |
| 251 | while (len > 0) { |
| 252 | ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
| 253 | int32_t xfer = MIN(len, entry.len); |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 254 | dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir); |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 255 | ptr += xfer; |
| 256 | len -= xfer; |
| 257 | resid -= xfer; |
| 258 | } |
| 259 | |
| 260 | return resid; |
| 261 | } |
| 262 | |
| 263 | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
| 264 | { |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 265 | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE); |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 266 | } |
| 267 | |
| 268 | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
| 269 | { |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 270 | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE); |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 271 | } |
Paolo Bonzini | 84a6935 | 2011-09-05 14:20:29 +0200 | [diff] [blame] | 272 | |
| 273 | void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, |
| 274 | QEMUSGList *sg, enum BlockAcctType type) |
| 275 | { |
| 276 | bdrv_acct_start(bs, cookie, sg->size, type); |
| 277 | } |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 278 | |
| 279 | bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, |
| 280 | DMADirection dir) |
| 281 | { |
| 282 | target_phys_addr_t paddr, plen; |
| 283 | |
| 284 | #ifdef DEBUG_IOMMU |
| 285 | fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT |
| 286 | " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); |
| 287 | #endif |
| 288 | |
| 289 | while (len) { |
| 290 | if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) { |
| 291 | return false; |
| 292 | } |
| 293 | |
| 294 | /* The translation might be valid for larger regions. */ |
| 295 | if (plen > len) { |
| 296 | plen = len; |
| 297 | } |
| 298 | |
| 299 | len -= plen; |
| 300 | addr += plen; |
| 301 | } |
| 302 | |
| 303 | return true; |
| 304 | } |
| 305 | |
| 306 | int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
| 307 | void *buf, dma_addr_t len, DMADirection dir) |
| 308 | { |
| 309 | target_phys_addr_t paddr, plen; |
| 310 | int err; |
| 311 | |
| 312 | #ifdef DEBUG_IOMMU |
| 313 | fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x" |
| 314 | DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); |
| 315 | #endif |
| 316 | |
| 317 | while (len) { |
| 318 | err = dma->translate(dma, addr, &paddr, &plen, dir); |
| 319 | if (err) { |
| 320 | /* |
| 321 | * In case of failure on reads from the guest, we clean the |
| 322 | * destination buffer so that a device that doesn't test |
| 323 | * for errors will not expose qemu internal memory. |
| 324 | */ |
| 325 | memset(buf, 0, len); |
| 326 | return -1; |
| 327 | } |
| 328 | |
| 329 | /* The translation might be valid for larger regions. */ |
| 330 | if (plen > len) { |
| 331 | plen = len; |
| 332 | } |
| 333 | |
| 334 | cpu_physical_memory_rw(paddr, buf, plen, |
| 335 | dir == DMA_DIRECTION_FROM_DEVICE); |
| 336 | |
| 337 | len -= plen; |
| 338 | addr += plen; |
| 339 | buf += plen; |
| 340 | } |
| 341 | |
| 342 | return 0; |
| 343 | } |
| 344 | |
| 345 | int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, |
| 346 | dma_addr_t len) |
| 347 | { |
| 348 | target_phys_addr_t paddr, plen; |
| 349 | int err; |
| 350 | |
| 351 | #ifdef DEBUG_IOMMU |
| 352 | fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT |
| 353 | " len=0x" DMA_ADDR_FMT "\n", dma, addr, len); |
| 354 | #endif |
| 355 | |
| 356 | while (len) { |
| 357 | err = dma->translate(dma, addr, &paddr, &plen, |
| 358 | DMA_DIRECTION_FROM_DEVICE); |
| 359 | if (err) { |
| 360 | return err; |
| 361 | } |
| 362 | |
| 363 | /* The translation might be valid for larger regions. */ |
| 364 | if (plen > len) { |
| 365 | plen = len; |
| 366 | } |
| 367 | |
| 368 | do_dma_memory_set(paddr, c, plen); |
| 369 | |
| 370 | len -= plen; |
| 371 | addr += plen; |
| 372 | } |
| 373 | |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | void dma_context_init(DMAContext *dma, DMATranslateFunc translate, |
| 378 | DMAMapFunc map, DMAUnmapFunc unmap) |
| 379 | { |
| 380 | #ifdef DEBUG_IOMMU |
| 381 | fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n", |
| 382 | dma, translate, map, unmap); |
| 383 | #endif |
| 384 | dma->translate = translate; |
| 385 | dma->map = map; |
| 386 | dma->unmap = unmap; |
| 387 | } |
| 388 | |
| 389 | void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len, |
| 390 | DMADirection dir) |
| 391 | { |
| 392 | int err; |
| 393 | target_phys_addr_t paddr, plen; |
| 394 | void *buf; |
| 395 | |
| 396 | if (dma->map) { |
| 397 | return dma->map(dma, addr, len, dir); |
| 398 | } |
| 399 | |
| 400 | plen = *len; |
| 401 | err = dma->translate(dma, addr, &paddr, &plen, dir); |
| 402 | if (err) { |
| 403 | return NULL; |
| 404 | } |
| 405 | |
| 406 | /* |
| 407 | * If this is true, the virtual region is contiguous, |
| 408 | * but the translated physical region isn't. We just |
| 409 | * clamp *len, much like cpu_physical_memory_map() does. |
| 410 | */ |
| 411 | if (plen < *len) { |
| 412 | *len = plen; |
| 413 | } |
| 414 | |
| 415 | buf = cpu_physical_memory_map(paddr, &plen, |
| 416 | dir == DMA_DIRECTION_FROM_DEVICE); |
| 417 | *len = plen; |
| 418 | |
| 419 | return buf; |
| 420 | } |
| 421 | |
| 422 | void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len, |
| 423 | DMADirection dir, dma_addr_t access_len) |
| 424 | { |
| 425 | if (dma->unmap) { |
| 426 | dma->unmap(dma, buffer, len, dir, access_len); |
| 427 | return; |
| 428 | } |
| 429 | |
| 430 | cpu_physical_memory_unmap(buffer, len, |
| 431 | dir == DMA_DIRECTION_FROM_DEVICE, |
| 432 | access_len); |
| 433 | |
| 434 | } |