blob: b3faef41b2f22eae40772ea4245829ef861e48b3 [file] [log] [blame]
aliguori244ab902009-02-05 21:23:50 +00001/*
2 * DMA helper functions
3 *
Philippe Mathieu-Daudé9c211ad2020-10-23 17:19:16 +02004 * Copyright (c) 2009, 2020 Red Hat
aliguori244ab902009-02-05 21:23:50 +00005 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
10#ifndef DMA_H
11#define DMA_H
12
Paolo Bonzini022c62c2012-12-17 18:19:49 +010013#include "exec/memory.h"
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020014#include "exec/address-spaces.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010015#include "block/block.h"
Benoît Canet5e5a94b2014-09-05 15:46:16 +020016#include "block/accounting.h"
aliguori244ab902009-02-05 21:23:50 +000017
Paolo Bonzini10dc8ae2011-09-16 16:40:01 +020018typedef struct ScatterGatherEntry ScatterGatherEntry;
19
David Gibson43cf8ae2012-03-27 13:42:23 +110020typedef enum {
21 DMA_DIRECTION_TO_DEVICE = 0,
22 DMA_DIRECTION_FROM_DEVICE = 1,
23} DMADirection;
24
Paolo Bonzinifead0c22011-11-09 16:58:30 +010025struct QEMUSGList {
26 ScatterGatherEntry *sg;
27 int nsg;
28 int nalloc;
29 size_t size;
Paolo Bonzinif487b672013-06-03 14:17:19 +020030 DeviceState *dev;
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020031 AddressSpace *as;
Paolo Bonzinifead0c22011-11-09 16:58:30 +010032};
33
Avi Kivity4be403c2012-10-04 12:36:04 +020034#ifndef CONFIG_USER_ONLY
David Gibsond9d10552011-10-31 17:06:45 +110035
David Gibsone5332e62012-06-27 14:50:43 +100036/*
37 * When an IOMMU is present, bus addresses become distinct from
38 * CPU/memory physical addresses and may be a different size. Because
39 * the IOVA size depends more on the bus than on the platform, we more
40 * or less have to treat these as 64-bit always to cover all (or at
41 * least most) cases.
42 */
43typedef uint64_t dma_addr_t;
44
45#define DMA_ADDR_BITS 64
46#define DMA_ADDR_FMT "%" PRIx64
47
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020048static inline void dma_barrier(AddressSpace *as, DMADirection dir)
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +100049{
50 /*
51 * This is called before DMA read and write operations
52 * unless the _relaxed form is used and is responsible
53 * for providing some sane ordering of accesses vs
54 * concurrently running VCPUs.
55 *
56 * Users of map(), unmap() or lower level st/ld_*
57 * operations are responsible for providing their own
58 * ordering via barriers.
59 *
60 * This primitive implementation does a simple smp_mb()
61 * before each operation which provides pretty much full
62 * ordering.
63 *
64 * A smarter implementation can be devised if needed to
65 * use lighter barriers based on the direction of the
66 * transfer, the DMA context, etc...
67 */
Paolo Bonzini77ac58d2014-09-17 12:21:29 +020068 smp_mb();
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +100069}
70
David Gibsond86a77f2012-06-27 14:50:38 +100071/* Checks that the given range of addresses is valid for DMA. This is
72 * useful for certain cases, but usually you should just use
73 * dma_memory_{read,write}() and check for errors */
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020074static inline bool dma_memory_valid(AddressSpace *as,
David Gibsone5332e62012-06-27 14:50:43 +100075 dma_addr_t addr, dma_addr_t len,
Philippe Mathieu-Daudé7ccb3912020-09-03 09:28:49 +020076 DMADirection dir, MemTxAttrs attrs)
David Gibsond86a77f2012-06-27 14:50:38 +100077{
Paolo Bonzinidf32fd12013-04-10 18:15:49 +020078 return address_space_access_valid(as, addr, len,
Peter Maydellfddffa42018-05-31 14:50:52 +010079 dir == DMA_DIRECTION_FROM_DEVICE,
Philippe Mathieu-Daudé7ccb3912020-09-03 09:28:49 +020080 attrs);
David Gibsond86a77f2012-06-27 14:50:38 +100081}
82
Philippe Mathieu-Daudé9989bcd2020-10-23 17:19:18 +020083static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
84 dma_addr_t addr,
85 void *buf, dma_addr_t len,
Philippe Mathieu-Daudé4afd0f22020-09-03 09:30:10 +020086 DMADirection dir,
87 MemTxAttrs attrs)
David Gibsond86a77f2012-06-27 14:50:38 +100088{
Philippe Mathieu-Daudé4afd0f22020-09-03 09:30:10 +020089 return address_space_rw(as, addr, attrs,
Philippe Mathieu-Daudé9989bcd2020-10-23 17:19:18 +020090 buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
David Gibsond86a77f2012-06-27 14:50:38 +100091}
92
Philippe Mathieu-Daudéb1f51302020-10-23 17:19:19 +020093static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
94 dma_addr_t addr,
95 void *buf, dma_addr_t len)
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +100096{
Philippe Mathieu-Daudé4afd0f22020-09-03 09:30:10 +020097 return dma_memory_rw_relaxed(as, addr, buf, len,
98 DMA_DIRECTION_TO_DEVICE,
99 MEMTXATTRS_UNSPECIFIED);
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +1000100}
101
Philippe Mathieu-Daudé77c71d12020-10-23 17:19:20 +0200102static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
103 dma_addr_t addr,
104 const void *buf,
105 dma_addr_t len)
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +1000106{
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200107 return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
Philippe Mathieu-Daudé4afd0f22020-09-03 09:30:10 +0200108 DMA_DIRECTION_FROM_DEVICE,
109 MEMTXATTRS_UNSPECIFIED);
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +1000110}
111
Philippe Mathieu-Daudé9989bcd2020-10-23 17:19:18 +0200112/**
113 * dma_memory_rw: Read from or write to an address space from DMA controller.
114 *
115 * Return a MemTxResult indicating whether the operation succeeded
116 * or failed (eg unassigned memory, device rejected the transaction,
117 * IOMMU fault).
118 *
119 * @as: #AddressSpace to be accessed
120 * @addr: address within that address space
121 * @buf: buffer with the data transferred
122 * @len: the number of bytes to read or write
123 * @dir: indicates the transfer direction
Philippe Mathieu-Daudé23faf562020-09-03 09:37:43 +0200124 * @attrs: memory transaction attributes
Philippe Mathieu-Daudé9989bcd2020-10-23 17:19:18 +0200125 */
126static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
127 void *buf, dma_addr_t len,
Philippe Mathieu-Daudé23faf562020-09-03 09:37:43 +0200128 DMADirection dir, MemTxAttrs attrs)
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +1000129{
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200130 dma_barrier(as, dir);
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +1000131
Philippe Mathieu-Daudé23faf562020-09-03 09:37:43 +0200132 return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs);
Benjamin Herrenschmidt7a0bac42012-06-27 14:50:47 +1000133}
134
Philippe Mathieu-Daudéb1f51302020-10-23 17:19:19 +0200135/**
136 * dma_memory_read: Read from an address space from DMA controller.
137 *
138 * Return a MemTxResult indicating whether the operation succeeded
139 * or failed (eg unassigned memory, device rejected the transaction,
140 * IOMMU fault). Called within RCU critical section.
141 *
142 * @as: #AddressSpace to be accessed
143 * @addr: address within that address space
144 * @buf: buffer with the data transferred
145 * @len: length of the data transferred
Philippe Mathieu-Daudéba06fe82020-09-03 10:08:29 +0200146 * @attrs: memory transaction attributes
Philippe Mathieu-Daudéb1f51302020-10-23 17:19:19 +0200147 */
148static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
Philippe Mathieu-Daudéba06fe82020-09-03 10:08:29 +0200149 void *buf, dma_addr_t len,
150 MemTxAttrs attrs)
David Gibsond86a77f2012-06-27 14:50:38 +1000151{
Philippe Mathieu-Daudé23faf562020-09-03 09:37:43 +0200152 return dma_memory_rw(as, addr, buf, len,
Philippe Mathieu-Daudéba06fe82020-09-03 10:08:29 +0200153 DMA_DIRECTION_TO_DEVICE, attrs);
David Gibsond86a77f2012-06-27 14:50:38 +1000154}
155
Philippe Mathieu-Daudé77c71d12020-10-23 17:19:20 +0200156/**
157 * address_space_write: Write to address space from DMA controller.
158 *
159 * Return a MemTxResult indicating whether the operation succeeded
160 * or failed (eg unassigned memory, device rejected the transaction,
161 * IOMMU fault).
162 *
163 * @as: #AddressSpace to be accessed
164 * @addr: address within that address space
165 * @buf: buffer with the data transferred
166 * @len: the number of bytes to write
Philippe Mathieu-Daudéba06fe82020-09-03 10:08:29 +0200167 * @attrs: memory transaction attributes
Philippe Mathieu-Daudé77c71d12020-10-23 17:19:20 +0200168 */
169static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
Philippe Mathieu-Daudéba06fe82020-09-03 10:08:29 +0200170 const void *buf, dma_addr_t len,
171 MemTxAttrs attrs)
David Gibsond86a77f2012-06-27 14:50:38 +1000172{
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200173 return dma_memory_rw(as, addr, (void *)buf, len,
Philippe Mathieu-Daudéba06fe82020-09-03 10:08:29 +0200174 DMA_DIRECTION_FROM_DEVICE, attrs);
David Gibsond86a77f2012-06-27 14:50:38 +1000175}
176
Philippe Mathieu-Daudébb755f52020-10-23 17:19:17 +0200177/**
178 * dma_memory_set: Fill memory with a constant byte from DMA controller.
179 *
180 * Return a MemTxResult indicating whether the operation succeeded
181 * or failed (eg unassigned memory, device rejected the transaction,
182 * IOMMU fault).
183 *
184 * @as: #AddressSpace to be accessed
185 * @addr: address within that address space
186 * @c: constant byte to fill the memory
187 * @len: the number of bytes to fill with the constant byte
Philippe Mathieu-Daudé7a36e422020-09-03 10:28:32 +0200188 * @attrs: memory transaction attributes
Philippe Mathieu-Daudébb755f52020-10-23 17:19:17 +0200189 */
190MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
Philippe Mathieu-Daudé7a36e422020-09-03 10:28:32 +0200191 uint8_t c, dma_addr_t len, MemTxAttrs attrs);
David Gibsond86a77f2012-06-27 14:50:38 +1000192
Philippe Mathieu-Daudé9c211ad2020-10-23 17:19:16 +0200193/**
194 * address_space_map: Map a physical memory region into a host virtual address.
195 *
196 * May map a subset of the requested range, given by and returned in @plen.
197 * May return %NULL and set *@plen to zero(0), if resources needed to perform
198 * the mapping are exhausted.
199 * Use only for reads OR writes - not for read-modify-write operations.
200 *
201 * @as: #AddressSpace to be accessed
202 * @addr: address within that address space
203 * @len: pointer to length of buffer; updated on return
204 * @dir: indicates the transfer direction
Philippe Mathieu-Daudéa1d4b0a2020-09-03 11:00:47 +0200205 * @attrs: memory attributes
Philippe Mathieu-Daudé9c211ad2020-10-23 17:19:16 +0200206 */
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200207static inline void *dma_memory_map(AddressSpace *as,
David Gibsond86a77f2012-06-27 14:50:38 +1000208 dma_addr_t addr, dma_addr_t *len,
Philippe Mathieu-Daudéa1d4b0a2020-09-03 11:00:47 +0200209 DMADirection dir, MemTxAttrs attrs)
David Gibsond86a77f2012-06-27 14:50:38 +1000210{
Paolo Bonzini24addbc2013-04-10 17:49:04 +0200211 hwaddr xlen = *len;
212 void *p;
David Gibsond86a77f2012-06-27 14:50:38 +1000213
Peter Maydellf26404f2018-05-31 14:50:52 +0100214 p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
Philippe Mathieu-Daudéa1d4b0a2020-09-03 11:00:47 +0200215 attrs);
Paolo Bonzini24addbc2013-04-10 17:49:04 +0200216 *len = xlen;
217 return p;
David Gibsond86a77f2012-06-27 14:50:38 +1000218}
219
Philippe Mathieu-Daudé9c211ad2020-10-23 17:19:16 +0200220/**
221 * address_space_unmap: Unmaps a memory region previously mapped
222 * by dma_memory_map()
223 *
224 * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
225 * @access_len gives the amount of memory that was actually read or written
226 * by the caller.
227 *
228 * @as: #AddressSpace used
229 * @buffer: host pointer as returned by address_space_map()
230 * @len: buffer length as returned by address_space_map()
231 * @dir: indicates the transfer direction
232 * @access_len: amount of data actually transferred
233 */
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200234static inline void dma_memory_unmap(AddressSpace *as,
David Gibsond86a77f2012-06-27 14:50:38 +1000235 void *buffer, dma_addr_t len,
236 DMADirection dir, dma_addr_t access_len)
237{
Paolo Bonzinidf32fd12013-04-10 18:15:49 +0200238 address_space_unmap(as, buffer, (hwaddr)len,
Paolo Bonzini24addbc2013-04-10 17:49:04 +0200239 dir == DMA_DIRECTION_FROM_DEVICE, access_len);
David Gibsond86a77f2012-06-27 14:50:38 +1000240}
241
242#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
Philippe Mathieu-Daudécd1db8d2021-12-17 22:31:11 +0100243 static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \
244 dma_addr_t addr, \
245 uint##_bits##_t *pval, \
246 MemTxAttrs attrs) \
247 { \
248 MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \
249 _end##_bits##_to_cpus(pval); \
250 return res; \
251 } \
Philippe Mathieu-Daudé24aed6b2021-12-17 23:56:14 +0100252 static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
253 dma_addr_t addr, \
254 uint##_bits##_t val, \
255 MemTxAttrs attrs) \
256 { \
257 val = cpu_to_##_end##_bits(val); \
258 return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
David Gibsond86a77f2012-06-27 14:50:38 +1000259 }
260
Philippe Mathieu-Daudécd1db8d2021-12-17 22:31:11 +0100261static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr,
262 uint8_t *val, MemTxAttrs attrs)
David Gibsond86a77f2012-06-27 14:50:38 +1000263{
Philippe Mathieu-Daudécd1db8d2021-12-17 22:31:11 +0100264 return dma_memory_read(as, addr, val, 1, attrs);
David Gibsond86a77f2012-06-27 14:50:38 +1000265}
266
Philippe Mathieu-Daudé24aed6b2021-12-17 23:56:14 +0100267static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
268 uint8_t val, MemTxAttrs attrs)
David Gibsond86a77f2012-06-27 14:50:38 +1000269{
Philippe Mathieu-Daudé24aed6b2021-12-17 23:56:14 +0100270 return dma_memory_write(as, addr, &val, 1, attrs);
David Gibsond86a77f2012-06-27 14:50:38 +1000271}
272
273DEFINE_LDST_DMA(uw, w, 16, le);
274DEFINE_LDST_DMA(l, l, 32, le);
275DEFINE_LDST_DMA(q, q, 64, le);
276DEFINE_LDST_DMA(uw, w, 16, be);
277DEFINE_LDST_DMA(l, l, 32, be);
278DEFINE_LDST_DMA(q, q, 64, be);
279
280#undef DEFINE_LDST_DMA
281
Paolo Bonzini10dc8ae2011-09-16 16:40:01 +0200282struct ScatterGatherEntry {
David Gibsond3231182011-10-31 17:06:46 +1100283 dma_addr_t base;
284 dma_addr_t len;
Paolo Bonzini10dc8ae2011-09-16 16:40:01 +0200285};
aliguori244ab902009-02-05 21:23:50 +0000286
Paolo Bonzinif487b672013-06-03 14:17:19 +0200287void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
288 AddressSpace *as);
David Gibsond3231182011-10-31 17:06:46 +1100289void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
aliguori244ab902009-02-05 21:23:50 +0000290void qemu_sglist_destroy(QEMUSGList *qsg);
Paolo Bonzini10dc8ae2011-09-16 16:40:01 +0200291#endif
aliguori244ab902009-02-05 21:23:50 +0000292
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200293typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
294 BlockCompletionFunc *cb, void *cb_opaque,
295 void *opaque);
Christoph Hellwigcb144cc2011-05-19 10:57:59 +0200296
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200297BlockAIOCB *dma_blk_io(AioContext *ctx,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400298 QEMUSGList *sg, uint64_t offset, uint32_t align,
Paolo Bonzini8a8e63e2016-05-23 14:54:06 +0200299 DMAIOFunc *io_func, void *io_func_opaque,
300 BlockCompletionFunc *cb, void *opaque, DMADirection dir);
Markus Armbruster4be74632014-10-07 13:59:18 +0200301BlockAIOCB *dma_blk_read(BlockBackend *blk,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400302 QEMUSGList *sg, uint64_t offset, uint32_t align,
Markus Armbruster4be74632014-10-07 13:59:18 +0200303 BlockCompletionFunc *cb, void *opaque);
304BlockAIOCB *dma_blk_write(BlockBackend *blk,
Mark Cave-Ayland99868af2016-10-27 16:29:13 -0400305 QEMUSGList *sg, uint64_t offset, uint32_t align,
Markus Armbruster097310b2014-10-07 13:59:15 +0200306 BlockCompletionFunc *cb, void *opaque);
Philippe Mathieu-Daudé1e5a3f82021-12-15 23:29:52 +0100307uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs);
Philippe Mathieu-Daudé392e48a2021-12-15 23:02:21 +0100308uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs);
Paolo Bonzini8171ee32011-07-06 08:02:14 +0200309
Markus Armbruster4be74632014-10-07 13:59:18 +0200310void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
Paolo Bonzini84a69352011-09-05 14:20:29 +0200311 QEMUSGList *sg, enum BlockAcctType type);
312
Eric Augerf14fb6c2021-03-09 11:27:37 +0100313/**
314 * dma_aligned_pow2_mask: Return the address bit mask of the largest
315 * power of 2 size less or equal than @end - @start + 1, aligned with @start,
316 * and bounded by 1 << @max_addr_bits bits.
317 *
318 * @start: range start address
319 * @end: range end address (greater than @start)
320 * @max_addr_bits: max address bits (<= 64)
321 */
322uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
323 int max_addr_bits);
324
aliguori244ab902009-02-05 21:23:50 +0000325#endif