aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 1 | /* |
| 2 | * DMA helper functions |
| 3 | * |
| 4 | * Copyright (c) 2009 Red Hat |
| 5 | * |
| 6 | * This work is licensed under the terms of the GNU General Public License |
| 7 | * (GNU GPL), version 2 or later. |
| 8 | */ |
| 9 | |
| 10 | #ifndef DMA_H |
| 11 | #define DMA_H |
| 12 | |
| 13 | #include <stdio.h> |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 14 | #include "hw/hw.h" |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 15 | #include "block.h" |
Benjamin Herrenschmidt | 7a0bac4 | 2012-06-27 14:50:47 +1000 | [diff] [blame] | 16 | #include "kvm.h" |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 17 | |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 18 | typedef struct DMAContext DMAContext; |
Paolo Bonzini | 10dc8ae | 2011-09-16 16:40:01 +0200 | [diff] [blame] | 19 | typedef struct ScatterGatherEntry ScatterGatherEntry; |
| 20 | |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 21 | typedef enum { |
| 22 | DMA_DIRECTION_TO_DEVICE = 0, |
| 23 | DMA_DIRECTION_FROM_DEVICE = 1, |
| 24 | } DMADirection; |
| 25 | |
Paolo Bonzini | fead0c2 | 2011-11-09 16:58:30 +0100 | [diff] [blame] | 26 | struct QEMUSGList { |
| 27 | ScatterGatherEntry *sg; |
| 28 | int nsg; |
| 29 | int nalloc; |
| 30 | size_t size; |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 31 | DMAContext *dma; |
Paolo Bonzini | fead0c2 | 2011-11-09 16:58:30 +0100 | [diff] [blame] | 32 | }; |
| 33 | |
Paolo Bonzini | 10dc8ae | 2011-09-16 16:40:01 +0200 | [diff] [blame] | 34 | #if defined(TARGET_PHYS_ADDR_BITS) |
David Gibson | d9d1055 | 2011-10-31 17:06:45 +1100 | [diff] [blame] | 35 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 36 | /* |
| 37 | * When an IOMMU is present, bus addresses become distinct from |
| 38 | * CPU/memory physical addresses and may be a different size. Because |
| 39 | * the IOVA size depends more on the bus than on the platform, we more |
| 40 | * or less have to treat these as 64-bit always to cover all (or at |
| 41 | * least most) cases. |
| 42 | */ |
| 43 | typedef uint64_t dma_addr_t; |
| 44 | |
| 45 | #define DMA_ADDR_BITS 64 |
| 46 | #define DMA_ADDR_FMT "%" PRIx64 |
| 47 | |
| 48 | typedef int DMATranslateFunc(DMAContext *dma, |
| 49 | dma_addr_t addr, |
| 50 | target_phys_addr_t *paddr, |
| 51 | target_phys_addr_t *len, |
| 52 | DMADirection dir); |
| 53 | typedef void* DMAMapFunc(DMAContext *dma, |
| 54 | dma_addr_t addr, |
| 55 | dma_addr_t *len, |
| 56 | DMADirection dir); |
| 57 | typedef void DMAUnmapFunc(DMAContext *dma, |
| 58 | void *buffer, |
| 59 | dma_addr_t len, |
| 60 | DMADirection dir, |
| 61 | dma_addr_t access_len); |
| 62 | |
| 63 | struct DMAContext { |
| 64 | DMATranslateFunc *translate; |
| 65 | DMAMapFunc *map; |
| 66 | DMAUnmapFunc *unmap; |
| 67 | }; |
| 68 | |
Benjamin Herrenschmidt | 7a0bac4 | 2012-06-27 14:50:47 +1000 | [diff] [blame] | 69 | static inline void dma_barrier(DMAContext *dma, DMADirection dir) |
| 70 | { |
| 71 | /* |
| 72 | * This is called before DMA read and write operations |
| 73 | * unless the _relaxed form is used and is responsible |
| 74 | * for providing some sane ordering of accesses vs |
| 75 | * concurrently running VCPUs. |
| 76 | * |
| 77 | * Users of map(), unmap() or lower level st/ld_* |
| 78 | * operations are responsible for providing their own |
| 79 | * ordering via barriers. |
| 80 | * |
| 81 | * This primitive implementation does a simple smp_mb() |
| 82 | * before each operation which provides pretty much full |
| 83 | * ordering. |
| 84 | * |
| 85 | * A smarter implementation can be devised if needed to |
| 86 | * use lighter barriers based on the direction of the |
| 87 | * transfer, the DMA context, etc... |
| 88 | */ |
| 89 | if (kvm_enabled()) { |
| 90 | smp_mb(); |
| 91 | } |
| 92 | } |
| 93 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 94 | static inline bool dma_has_iommu(DMAContext *dma) |
| 95 | { |
| 96 | return !!dma; |
| 97 | } |
David Gibson | d9d1055 | 2011-10-31 17:06:45 +1100 | [diff] [blame] | 98 | |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 99 | /* Checks that the given range of addresses is valid for DMA. This is |
| 100 | * useful for certain cases, but usually you should just use |
| 101 | * dma_memory_{read,write}() and check for errors */ |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 102 | bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, |
| 103 | DMADirection dir); |
| 104 | static inline bool dma_memory_valid(DMAContext *dma, |
| 105 | dma_addr_t addr, dma_addr_t len, |
| 106 | DMADirection dir) |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 107 | { |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 108 | if (!dma_has_iommu(dma)) { |
| 109 | return true; |
| 110 | } else { |
| 111 | return iommu_dma_memory_valid(dma, addr, len, dir); |
| 112 | } |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 113 | } |
| 114 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 115 | int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
| 116 | void *buf, dma_addr_t len, DMADirection dir); |
Benjamin Herrenschmidt | 7a0bac4 | 2012-06-27 14:50:47 +1000 | [diff] [blame] | 117 | static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr, |
| 118 | void *buf, dma_addr_t len, |
| 119 | DMADirection dir) |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 120 | { |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 121 | if (!dma_has_iommu(dma)) { |
| 122 | /* Fast-path for no IOMMU */ |
| 123 | cpu_physical_memory_rw(addr, buf, len, |
| 124 | dir == DMA_DIRECTION_FROM_DEVICE); |
| 125 | return 0; |
| 126 | } else { |
| 127 | return iommu_dma_memory_rw(dma, addr, buf, len, dir); |
| 128 | } |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 129 | } |
| 130 | |
Benjamin Herrenschmidt | 7a0bac4 | 2012-06-27 14:50:47 +1000 | [diff] [blame] | 131 | static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr, |
| 132 | void *buf, dma_addr_t len) |
| 133 | { |
| 134 | return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); |
| 135 | } |
| 136 | |
| 137 | static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr, |
| 138 | const void *buf, dma_addr_t len) |
| 139 | { |
| 140 | return dma_memory_rw_relaxed(dma, addr, (void *)buf, len, |
| 141 | DMA_DIRECTION_FROM_DEVICE); |
| 142 | } |
| 143 | |
| 144 | static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
| 145 | void *buf, dma_addr_t len, |
| 146 | DMADirection dir) |
| 147 | { |
| 148 | dma_barrier(dma, dir); |
| 149 | |
| 150 | return dma_memory_rw_relaxed(dma, addr, buf, len, dir); |
| 151 | } |
| 152 | |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 153 | static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr, |
| 154 | void *buf, dma_addr_t len) |
| 155 | { |
| 156 | return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); |
| 157 | } |
| 158 | |
| 159 | static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr, |
| 160 | const void *buf, dma_addr_t len) |
| 161 | { |
| 162 | return dma_memory_rw(dma, addr, (void *)buf, len, |
| 163 | DMA_DIRECTION_FROM_DEVICE); |
| 164 | } |
| 165 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 166 | int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, |
| 167 | dma_addr_t len); |
| 168 | |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 169 | int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len); |
| 170 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 171 | void *iommu_dma_memory_map(DMAContext *dma, |
| 172 | dma_addr_t addr, dma_addr_t *len, |
| 173 | DMADirection dir); |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 174 | static inline void *dma_memory_map(DMAContext *dma, |
| 175 | dma_addr_t addr, dma_addr_t *len, |
| 176 | DMADirection dir) |
| 177 | { |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 178 | if (!dma_has_iommu(dma)) { |
| 179 | target_phys_addr_t xlen = *len; |
| 180 | void *p; |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 181 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 182 | p = cpu_physical_memory_map(addr, &xlen, |
| 183 | dir == DMA_DIRECTION_FROM_DEVICE); |
| 184 | *len = xlen; |
| 185 | return p; |
| 186 | } else { |
| 187 | return iommu_dma_memory_map(dma, addr, len, dir); |
| 188 | } |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 189 | } |
| 190 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 191 | void iommu_dma_memory_unmap(DMAContext *dma, |
| 192 | void *buffer, dma_addr_t len, |
| 193 | DMADirection dir, dma_addr_t access_len); |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 194 | static inline void dma_memory_unmap(DMAContext *dma, |
| 195 | void *buffer, dma_addr_t len, |
| 196 | DMADirection dir, dma_addr_t access_len) |
| 197 | { |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 198 | if (!dma_has_iommu(dma)) { |
Blue Swirl | 0ed8b6f | 2012-07-08 06:56:53 +0000 | [diff] [blame] | 199 | cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len, |
| 200 | dir == DMA_DIRECTION_FROM_DEVICE, |
| 201 | access_len); |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 202 | } else { |
| 203 | iommu_dma_memory_unmap(dma, buffer, len, dir, access_len); |
| 204 | } |
David Gibson | d86a77f | 2012-06-27 14:50:38 +1000 | [diff] [blame] | 205 | } |
| 206 | |
| 207 | #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ |
| 208 | static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \ |
| 209 | dma_addr_t addr) \ |
| 210 | { \ |
| 211 | uint##_bits##_t val; \ |
| 212 | dma_memory_read(dma, addr, &val, (_bits) / 8); \ |
| 213 | return _end##_bits##_to_cpu(val); \ |
| 214 | } \ |
| 215 | static inline void st##_sname##_##_end##_dma(DMAContext *dma, \ |
| 216 | dma_addr_t addr, \ |
| 217 | uint##_bits##_t val) \ |
| 218 | { \ |
| 219 | val = cpu_to_##_end##_bits(val); \ |
| 220 | dma_memory_write(dma, addr, &val, (_bits) / 8); \ |
| 221 | } |
| 222 | |
| 223 | static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr) |
| 224 | { |
| 225 | uint8_t val; |
| 226 | |
| 227 | dma_memory_read(dma, addr, &val, 1); |
| 228 | return val; |
| 229 | } |
| 230 | |
| 231 | static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val) |
| 232 | { |
| 233 | dma_memory_write(dma, addr, &val, 1); |
| 234 | } |
| 235 | |
| 236 | DEFINE_LDST_DMA(uw, w, 16, le); |
| 237 | DEFINE_LDST_DMA(l, l, 32, le); |
| 238 | DEFINE_LDST_DMA(q, q, 64, le); |
| 239 | DEFINE_LDST_DMA(uw, w, 16, be); |
| 240 | DEFINE_LDST_DMA(l, l, 32, be); |
| 241 | DEFINE_LDST_DMA(q, q, 64, be); |
| 242 | |
| 243 | #undef DEFINE_LDST_DMA |
| 244 | |
David Gibson | e5332e6 | 2012-06-27 14:50:43 +1000 | [diff] [blame] | 245 | void dma_context_init(DMAContext *dma, DMATranslateFunc translate, |
| 246 | DMAMapFunc map, DMAUnmapFunc unmap); |
| 247 | |
Paolo Bonzini | 10dc8ae | 2011-09-16 16:40:01 +0200 | [diff] [blame] | 248 | struct ScatterGatherEntry { |
David Gibson | d323118 | 2011-10-31 17:06:46 +1100 | [diff] [blame] | 249 | dma_addr_t base; |
| 250 | dma_addr_t len; |
Paolo Bonzini | 10dc8ae | 2011-09-16 16:40:01 +0200 | [diff] [blame] | 251 | }; |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 252 | |
David Gibson | c65bcef | 2012-06-27 14:50:40 +1000 | [diff] [blame] | 253 | void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma); |
David Gibson | d323118 | 2011-10-31 17:06:46 +1100 | [diff] [blame] | 254 | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len); |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 255 | void qemu_sglist_destroy(QEMUSGList *qsg); |
Paolo Bonzini | 10dc8ae | 2011-09-16 16:40:01 +0200 | [diff] [blame] | 256 | #endif |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 257 | |
Christoph Hellwig | cb144cc | 2011-05-19 10:57:59 +0200 | [diff] [blame] | 258 | typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num, |
| 259 | QEMUIOVector *iov, int nb_sectors, |
| 260 | BlockDriverCompletionFunc *cb, void *opaque); |
| 261 | |
| 262 | BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs, |
| 263 | QEMUSGList *sg, uint64_t sector_num, |
| 264 | DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, |
David Gibson | 43cf8ae | 2012-03-27 13:42:23 +1100 | [diff] [blame] | 265 | void *opaque, DMADirection dir); |
aliguori | 59a703e | 2009-02-05 21:23:58 +0000 | [diff] [blame] | 266 | BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
| 267 | QEMUSGList *sg, uint64_t sector, |
| 268 | BlockDriverCompletionFunc *cb, void *opaque); |
| 269 | BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, |
| 270 | QEMUSGList *sg, uint64_t sector, |
| 271 | BlockDriverCompletionFunc *cb, void *opaque); |
Paolo Bonzini | 8171ee3 | 2011-07-06 08:02:14 +0200 | [diff] [blame] | 272 | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg); |
| 273 | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); |
| 274 | |
Paolo Bonzini | 84a6935 | 2011-09-05 14:20:29 +0200 | [diff] [blame] | 275 | void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, |
| 276 | QEMUSGList *sg, enum BlockAcctType type); |
| 277 | |
aliguori | 244ab90 | 2009-02-05 21:23:50 +0000 | [diff] [blame] | 278 | #endif |