blob: fb15d0372c25e6b519a3c13f394b59b906ecdc9c [file] [log] [blame]
Marc-André Lureaud52c4542019-05-24 15:09:42 +02001/*
2 * Virtio vhost-user GPU Device
3 *
4 * DRM helpers
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 */
9
Markus Armbruster4bd802b2020-11-13 07:12:16 +010010#include "qemu/osdep.h"
Marc-André Lureaud52c4542019-05-24 15:09:42 +020011#include "vugbm.h"
12
13static bool
14mem_alloc_bo(struct vugbm_buffer *buf)
15{
16 buf->mmap = g_malloc(buf->width * buf->height * 4);
17 buf->stride = buf->width * 4;
18 return true;
19}
20
21static void
22mem_free_bo(struct vugbm_buffer *buf)
23{
24 g_free(buf->mmap);
25}
26
27static bool
28mem_map_bo(struct vugbm_buffer *buf)
29{
30 return buf->mmap != NULL;
31}
32
33static void
34mem_unmap_bo(struct vugbm_buffer *buf)
35{
36}
37
38static void
39mem_device_destroy(struct vugbm_device *dev)
40{
41}
42
43#ifdef CONFIG_MEMFD
44struct udmabuf_create {
45 uint32_t memfd;
46 uint32_t flags;
47 uint64_t offset;
48 uint64_t size;
49};
50
51#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
52
53static size_t
54udmabuf_get_size(struct vugbm_buffer *buf)
55{
Wei Yang038adc22019-10-13 10:11:45 +080056 return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size);
Marc-André Lureaud52c4542019-05-24 15:09:42 +020057}
58
59static bool
60udmabuf_alloc_bo(struct vugbm_buffer *buf)
61{
62 int ret;
63
64 buf->memfd = memfd_create("udmabuf-bo", MFD_ALLOW_SEALING);
65 if (buf->memfd < 0) {
66 return false;
67 }
68
69 ret = ftruncate(buf->memfd, udmabuf_get_size(buf));
70 if (ret < 0) {
71 close(buf->memfd);
72 return false;
73 }
74
75 ret = fcntl(buf->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
76 if (ret < 0) {
77 close(buf->memfd);
78 return false;
79 }
80
81 buf->stride = buf->width * 4;
82
83 return true;
84}
85
86static void
87udmabuf_free_bo(struct vugbm_buffer *buf)
88{
89 close(buf->memfd);
90}
91
92static bool
93udmabuf_map_bo(struct vugbm_buffer *buf)
94{
95 buf->mmap = mmap(NULL, udmabuf_get_size(buf),
96 PROT_READ | PROT_WRITE, MAP_SHARED, buf->memfd, 0);
97 if (buf->mmap == MAP_FAILED) {
98 return false;
99 }
100
101 return true;
102}
103
104static bool
105udmabuf_get_fd(struct vugbm_buffer *buf, int *fd)
106{
107 struct udmabuf_create create = {
108 .memfd = buf->memfd,
109 .offset = 0,
110 .size = udmabuf_get_size(buf),
111 };
112
113 *fd = ioctl(buf->dev->fd, UDMABUF_CREATE, &create);
114
115 return *fd >= 0;
116}
117
118static void
119udmabuf_unmap_bo(struct vugbm_buffer *buf)
120{
121 munmap(buf->mmap, udmabuf_get_size(buf));
122}
123
124static void
125udmabuf_device_destroy(struct vugbm_device *dev)
126{
127 close(dev->fd);
128}
129#endif
130
131#ifdef CONFIG_GBM
132static bool
133alloc_bo(struct vugbm_buffer *buf)
134{
135 struct gbm_device *dev = buf->dev->dev;
136
137 assert(!buf->bo);
138
139 buf->bo = gbm_bo_create(dev, buf->width, buf->height,
140 buf->format,
141 GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
142
143 if (buf->bo) {
144 buf->stride = gbm_bo_get_stride(buf->bo);
145 return true;
146 }
147
148 return false;
149}
150
151static void
152free_bo(struct vugbm_buffer *buf)
153{
154 gbm_bo_destroy(buf->bo);
155}
156
157static bool
158map_bo(struct vugbm_buffer *buf)
159{
160 uint32_t stride;
161
162 buf->mmap = gbm_bo_map(buf->bo, 0, 0, buf->width, buf->height,
163 GBM_BO_TRANSFER_READ_WRITE, &stride,
164 &buf->mmap_data);
165
166 assert(stride == buf->stride);
167
168 return buf->mmap != NULL;
169}
170
171static void
172unmap_bo(struct vugbm_buffer *buf)
173{
174 gbm_bo_unmap(buf->bo, buf->mmap_data);
175}
176
177static bool
178get_fd(struct vugbm_buffer *buf, int *fd)
179{
180 *fd = gbm_bo_get_fd(buf->bo);
181
182 return *fd >= 0;
183}
184
185static void
186device_destroy(struct vugbm_device *dev)
187{
188 gbm_device_destroy(dev->dev);
189}
190#endif
191
192void
193vugbm_device_destroy(struct vugbm_device *dev)
194{
195 if (!dev->inited) {
196 return;
197 }
198
199 dev->device_destroy(dev);
200}
201
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400202void
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200203vugbm_device_init(struct vugbm_device *dev, int fd)
204{
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400205 assert(!dev->inited);
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200206
207#ifdef CONFIG_GBM
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400208 if (fd >= 0) {
209 dev->dev = gbm_create_device(fd);
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200210 }
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400211 if (dev->dev != NULL) {
212 dev->fd = fd;
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200213 dev->alloc_bo = alloc_bo;
214 dev->free_bo = free_bo;
215 dev->get_fd = get_fd;
216 dev->map_bo = map_bo;
217 dev->unmap_bo = unmap_bo;
218 dev->device_destroy = device_destroy;
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400219 dev->inited = true;
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200220 }
221#endif
222#ifdef CONFIG_MEMFD
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400223 if (!dev->inited && g_file_test("/dev/udmabuf", G_FILE_TEST_EXISTS)) {
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200224 dev->fd = open("/dev/udmabuf", O_RDWR);
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400225 if (dev->fd >= 0) {
226 g_debug("Using experimental udmabuf backend");
227 dev->alloc_bo = udmabuf_alloc_bo;
228 dev->free_bo = udmabuf_free_bo;
229 dev->get_fd = udmabuf_get_fd;
230 dev->map_bo = udmabuf_map_bo;
231 dev->unmap_bo = udmabuf_unmap_bo;
232 dev->device_destroy = udmabuf_device_destroy;
233 dev->inited = true;
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200234 }
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200235 }
236#endif
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400237 if (!dev->inited) {
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200238 g_debug("Using mem fallback");
239 dev->alloc_bo = mem_alloc_bo;
240 dev->free_bo = mem_free_bo;
241 dev->map_bo = mem_map_bo;
242 dev->unmap_bo = mem_unmap_bo;
243 dev->device_destroy = mem_device_destroy;
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400244 dev->inited = true;
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200245 }
Marc-André Lureau96ee0962021-03-12 14:00:44 +0400246 assert(dev->inited);
Marc-André Lureaud52c4542019-05-24 15:09:42 +0200247}
248
249static bool
250vugbm_buffer_map(struct vugbm_buffer *buf)
251{
252 struct vugbm_device *dev = buf->dev;
253
254 return dev->map_bo(buf);
255}
256
257static void
258vugbm_buffer_unmap(struct vugbm_buffer *buf)
259{
260 struct vugbm_device *dev = buf->dev;
261
262 dev->unmap_bo(buf);
263}
264
265bool
266vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer)
267{
268 if (!buffer->dev->get_fd) {
269 return false;
270 }
271
272 return true;
273}
274
275bool
276vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd)
277{
278 if (!vugbm_buffer_can_get_dmabuf_fd(buffer) ||
279 !buffer->dev->get_fd(buffer, fd)) {
280 g_warning("Failed to get dmabuf");
281 return false;
282 }
283
284 if (*fd < 0) {
285 g_warning("error: dmabuf_fd < 0");
286 return false;
287 }
288
289 return true;
290}
291
292bool
293vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
294 uint32_t width, uint32_t height)
295{
296 buffer->dev = dev;
297 buffer->width = width;
298 buffer->height = height;
299 buffer->format = GBM_FORMAT_XRGB8888;
300 buffer->stride = 0; /* modified during alloc */
301 if (!dev->alloc_bo(buffer)) {
302 g_warning("alloc_bo failed");
303 return false;
304 }
305
306 if (!vugbm_buffer_map(buffer)) {
307 g_warning("map_bo failed");
308 goto err;
309 }
310
311 return true;
312
313err:
314 dev->free_bo(buffer);
315 return false;
316}
317
318void
319vugbm_buffer_destroy(struct vugbm_buffer *buffer)
320{
321 struct vugbm_device *dev = buffer->dev;
322
323 vugbm_buffer_unmap(buffer);
324 dev->free_bo(buffer);
325}