blob: 213c49777c40c5b31bae86138e026e4b52a6d596 [file] [log] [blame]
Gleb Natapov89acfa32010-05-10 11:36:37 +03001/* virtio-pci.c - pci interface for virtio interface
2 *
3 * (c) Copyright 2008 Bull S.A.S.
4 *
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 *
7 * some parts from Linux Virtio PCI driver
8 *
9 * Copyright IBM Corp. 2007
10 * Authors: Anthony Liguori <aliguori@us.ibm.com>
11 *
12 * Adopted for Seabios: Gleb Natapov <gleb@redhat.com>
13 *
14 * This work is licensed under the terms of the GNU LGPLv3
15 * See the COPYING file in the top-level directory.
16 */
17
Kevin O'Connor7d09d0e2010-05-10 21:51:38 -040018#include "config.h" // CONFIG_DEBUG_LEVEL
Kevin O'Connor9dea5902013-09-14 20:23:54 -040019#include "malloc.h" // free
Kevin O'Connor2d2fa312013-09-14 21:55:26 -040020#include "output.h" // dprintf
Sebastian Herbszt70451b62011-11-21 12:23:20 +010021#include "pci.h" // pci_config_readl
Aleksandr Bezzubikov7de1f652017-08-18 02:33:19 +030022#include "pcidevice.h" // struct pci_device
Sebastian Herbszt70451b62011-11-21 12:23:20 +010023#include "pci_regs.h" // PCI_BASE_ADDRESS_0
Kevin O'Connorfa9c66a2013-09-14 19:10:40 -040024#include "string.h" // memset
Kevin O'Connor2d2fa312013-09-14 21:55:26 -040025#include "virtio-pci.h"
Gerd Hoffmann040b92c2020-03-06 19:44:47 +010026#include "virtio-mmio.h"
Kevin O'Connor2d2fa312013-09-14 21:55:26 -040027#include "virtio-ring.h"
Gleb Natapov89acfa32010-05-10 11:36:37 +030028
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020029u64 _vp_read(struct vp_cap *cap, u32 offset, u8 size)
30{
Gerd Hoffmann0e215482015-07-03 11:07:05 +020031 u64 var = 0;
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020032
Gerd Hoffmann0e215482015-07-03 11:07:05 +020033 switch (cap->mode) {
34 case VP_ACCESS_IO:
35 {
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020036 u32 addr = cap->ioaddr + offset;
37 switch (size) {
38 case 8:
39 var = inl(addr);
40 var |= (u64)inl(addr+4) << 32;
41 break;
42 case 4:
43 var = inl(addr);
44 break;
45 case 2:
46 var = inw(addr);
47 break;
48 case 1:
49 var = inb(addr);
50 break;
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020051 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +020052 break;
53 }
54
55 case VP_ACCESS_MMIO:
56 {
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020057 void *addr = cap->memaddr + offset;
58 switch (size) {
59 case 8:
60 var = readl(addr);
61 var |= (u64)readl(addr+4) << 32;
62 break;
63 case 4:
64 var = readl(addr);
65 break;
66 case 2:
67 var = readw(addr);
68 break;
69 case 1:
70 var = readb(addr);
71 break;
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +020072 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +020073 break;
74 }
75
76 case VP_ACCESS_PCICFG:
77 {
78 u32 addr = cap->baroff + offset;
79 pci_config_writeb(cap->bdf, cap->cfg +
80 offsetof(struct virtio_pci_cfg_cap, cap.bar),
81 cap->bar);
82 pci_config_writel(cap->bdf, cap->cfg +
83 offsetof(struct virtio_pci_cfg_cap, cap.offset),
84 addr);
85 pci_config_writel(cap->bdf, cap->cfg +
86 offsetof(struct virtio_pci_cfg_cap, cap.length),
87 (size > 4) ? 4 : size);
88 switch (size) {
89 case 8:
90 var = pci_config_readl(cap->bdf, cap->cfg +
91 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
92 pci_config_writel(cap->bdf, cap->cfg +
93 offsetof(struct virtio_pci_cfg_cap, cap.offset),
94 addr + 4);
95 var |= (u64)pci_config_readl(cap->bdf, cap->cfg +
96 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data)) << 32;
97 break;
98 case 4:
99 var = pci_config_readl(cap->bdf, cap->cfg +
100 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
101 break;
102 case 2:
103 var = pci_config_readw(cap->bdf, cap->cfg +
104 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
105 break;
106 case 1:
107 var = pci_config_readb(cap->bdf, cap->cfg +
108 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data));
109 break;
110 }
111 }
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +0200112 }
113 dprintf(9, "vp read %x (%d) -> 0x%llx\n", cap->ioaddr + offset, size, var);
114 return var;
115}
116
117void _vp_write(struct vp_cap *cap, u32 offset, u8 size, u64 var)
118{
119 dprintf(9, "vp write %x (%d) <- 0x%llx\n", cap->ioaddr + offset, size, var);
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200120
121 switch (cap->mode) {
122 case VP_ACCESS_IO:
123 {
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +0200124 u32 addr = cap->ioaddr + offset;
125 switch (size) {
126 case 4:
127 outl(var, addr);
128 break;
129 case 2:
130 outw(var, addr);
131 break;
132 case 1:
133 outb(var, addr);
134 break;
135 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200136 break;
137 }
138
139 case VP_ACCESS_MMIO:
140 {
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +0200141 void *addr = cap->memaddr + offset;
142 switch (size) {
143 case 4:
144 writel(addr, var);
145 break;
146 case 2:
147 writew(addr, var);
148 break;
149 case 1:
150 writeb(addr, var);
151 break;
152 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200153 break;
154 }
155
156 case VP_ACCESS_PCICFG:
157 {
158 u32 addr = cap->baroff + offset;
159 pci_config_writeb(cap->bdf, cap->cfg +
160 offsetof(struct virtio_pci_cfg_cap, cap.bar),
161 cap->bar);
162 pci_config_writel(cap->bdf, cap->cfg +
163 offsetof(struct virtio_pci_cfg_cap, cap.offset),
164 addr);
165 pci_config_writel(cap->bdf, cap->cfg +
166 offsetof(struct virtio_pci_cfg_cap, cap.length),
167 size);
168 switch (size) {
169 case 4:
170 pci_config_writel(cap->bdf, cap->cfg +
171 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
172 var);
173 break;
174 case 2:
175 pci_config_writew(cap->bdf, cap->cfg +
176 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
177 var);
178 break;
179 case 1:
180 pci_config_writeb(cap->bdf, cap->cfg +
181 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
182 var);
183 break;
184 }
185 }
Gerd Hoffmannc579d2f2016-06-17 11:45:43 +0200186 }
187}
188
Gerd Hoffmann46d17922015-06-25 12:28:46 +0200189u64 vp_get_features(struct vp_device *vp)
190{
191 u32 f0, f1;
192
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100193 if (vp->use_mmio) {
194 vp_write(&vp->common, virtio_mmio_cfg, device_feature_select, 0);
195 f0 = vp_read(&vp->common, virtio_mmio_cfg, device_feature);
196 f1 = 0;
197 } else if (vp->use_modern) {
Gerd Hoffmann46d17922015-06-25 12:28:46 +0200198 vp_write(&vp->common, virtio_pci_common_cfg, device_feature_select, 0);
199 f0 = vp_read(&vp->common, virtio_pci_common_cfg, device_feature);
200 vp_write(&vp->common, virtio_pci_common_cfg, device_feature_select, 1);
201 f1 = vp_read(&vp->common, virtio_pci_common_cfg, device_feature);
202 } else {
203 f0 = vp_read(&vp->legacy, virtio_pci_legacy, host_features);
204 f1 = 0;
205 }
206 return ((u64)f1 << 32) | f0;
207}
208
209void vp_set_features(struct vp_device *vp, u64 features)
210{
211 u32 f0, f1;
212
213 f0 = features;
214 f1 = features >> 32;
215
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100216 if (vp->use_mmio) {
217 vp_write(&vp->common, virtio_mmio_cfg, guest_feature_select, f0);
218 vp_write(&vp->common, virtio_mmio_cfg, guest_feature, f0);
219 } else if (vp->use_modern) {
Gerd Hoffmann46d17922015-06-25 12:28:46 +0200220 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature_select, 0);
221 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature, f0);
222 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature_select, 1);
223 vp_write(&vp->common, virtio_pci_common_cfg, guest_feature, f1);
224 } else {
225 vp_write(&vp->legacy, virtio_pci_legacy, guest_features, f0);
226 }
227}
228
Gerd Hoffmann6ee97762015-06-25 16:37:41 +0200229u8 vp_get_status(struct vp_device *vp)
230{
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100231 if (vp->use_mmio) {
232 return vp_read(&vp->common, virtio_mmio_cfg, device_status);
233 } else if (vp->use_modern) {
Gerd Hoffmann6ee97762015-06-25 16:37:41 +0200234 return vp_read(&vp->common, virtio_pci_common_cfg, device_status);
235 } else {
236 return vp_read(&vp->legacy, virtio_pci_legacy, status);
237 }
238}
239
240void vp_set_status(struct vp_device *vp, u8 status)
241{
242 if (status == 0) /* reset */
243 return;
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100244 if (vp->use_mmio) {
245 vp_write(&vp->common, virtio_mmio_cfg, device_status, status);
246 } else if (vp->use_modern) {
Gerd Hoffmann6ee97762015-06-25 16:37:41 +0200247 vp_write(&vp->common, virtio_pci_common_cfg, device_status, status);
248 } else {
249 vp_write(&vp->legacy, virtio_pci_legacy, status, status);
250 }
251}
252
Gerd Hoffmannadeb2e32015-06-25 16:43:17 +0200253u8 vp_get_isr(struct vp_device *vp)
254{
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100255 if (vp->use_mmio) {
256 return vp_read(&vp->common, virtio_mmio_cfg, irq_status);
257 } else if (vp->use_modern) {
Gerd Hoffmannadeb2e32015-06-25 16:43:17 +0200258 return vp_read(&vp->isr, virtio_pci_isr, isr);
259 } else {
260 return vp_read(&vp->legacy, virtio_pci_legacy, isr);
261 }
262}
263
Gerd Hoffmann984db762015-06-25 16:44:06 +0200264void vp_reset(struct vp_device *vp)
265{
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100266 if (vp->use_mmio) {
267 vp_write(&vp->common, virtio_mmio_cfg, device_status, 0);
268 vp_read(&vp->common, virtio_mmio_cfg, irq_status);
269 } else if (vp->use_modern) {
Gerd Hoffmann984db762015-06-25 16:44:06 +0200270 vp_write(&vp->common, virtio_pci_common_cfg, device_status, 0);
271 vp_read(&vp->isr, virtio_pci_isr, isr);
272 } else {
273 vp_write(&vp->legacy, virtio_pci_legacy, status, 0);
274 vp_read(&vp->legacy, virtio_pci_legacy, isr);
275 }
276}
277
Gerd Hoffmann2e68fb12015-06-26 08:19:28 +0200278void vp_notify(struct vp_device *vp, struct vring_virtqueue *vq)
279{
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100280 if (vp->use_mmio) {
281 vp_write(&vp->common, virtio_mmio_cfg, queue_notify, vq->queue_index);
282 } else if (vp->use_modern) {
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500283 u32 offset = vq->queue_notify_off * vp->notify_off_multiplier;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200284 switch (vp->notify.mode) {
285 case VP_ACCESS_IO:
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500286 outw(vq->queue_index, vp->notify.ioaddr + offset);
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200287 break;
288 case VP_ACCESS_MMIO:
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500289 writew(vp->notify.memaddr + offset, vq->queue_index);
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200290 break;
291 case VP_ACCESS_PCICFG:
292 pci_config_writeb(vp->notify.bdf, vp->notify.cfg +
293 offsetof(struct virtio_pci_cfg_cap, cap.bar),
294 vp->notify.bar);
295 pci_config_writel(vp->notify.bdf, vp->notify.cfg +
296 offsetof(struct virtio_pci_cfg_cap, cap.offset),
297 vp->notify.baroff + offset);
298 pci_config_writel(vp->notify.bdf, vp->notify.cfg +
299 offsetof(struct virtio_pci_cfg_cap, cap.length),
300 2);
301 pci_config_writew(vp->notify.bdf, vp->notify.cfg +
302 offsetof(struct virtio_pci_cfg_cap, pci_cfg_data),
303 vq->queue_index);
Gerd Hoffmann2e68fb12015-06-26 08:19:28 +0200304 }
305 dprintf(9, "vp notify %x (%d) -- 0x%x\n",
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500306 vp->notify.ioaddr, 2, vq->queue_index);
Gerd Hoffmann2e68fb12015-06-26 08:19:28 +0200307 } else {
308 vp_write(&vp->legacy, virtio_pci_legacy, queue_notify, vq->queue_index);
309 }
310}
311
Gerd Hoffmanndaf5cc92015-06-25 09:36:16 +0200312int vp_find_vq(struct vp_device *vp, int queue_index,
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100313 struct vring_virtqueue **p_vq)
Gleb Natapov89acfa32010-05-10 11:36:37 +0300314{
Gleb Natapov89acfa32010-05-10 11:36:37 +0300315 u16 num;
316
317 ASSERT32FLAT();
Gerd Hoffmann6cfebb42015-07-01 14:39:30 +0200318 struct vring_virtqueue *vq = *p_vq = memalign_high(PAGE_SIZE, sizeof(*vq));
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100319 if (!vq) {
320 warn_noalloc();
321 goto fail;
322 }
323 memset(vq, 0, sizeof(*vq));
324
Gleb Natapov89acfa32010-05-10 11:36:37 +0300325
Gerd Hoffmann6c14fef2015-06-26 09:07:59 +0200326 /* select the queue */
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100327 if (vp->use_mmio) {
328 vp_write(&vp->common, virtio_mmio_cfg, queue_select, queue_index);
329 } else if (vp->use_modern) {
Gerd Hoffmann6c14fef2015-06-26 09:07:59 +0200330 vp_write(&vp->common, virtio_pci_common_cfg, queue_select, queue_index);
331 } else {
332 vp_write(&vp->legacy, virtio_pci_legacy, queue_sel, queue_index);
333 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300334
335 /* check if the queue is available */
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100336 if (vp->use_mmio) {
337 num = vp_read(&vp->common, virtio_mmio_cfg, queue_num_max);
338 if (num > MAX_QUEUE_NUM)
339 num = MAX_QUEUE_NUM;
340 vp_write(&vp->common, virtio_mmio_cfg, queue_num, num);
341 } else if (vp->use_modern) {
Gerd Hoffmann6c14fef2015-06-26 09:07:59 +0200342 num = vp_read(&vp->common, virtio_pci_common_cfg, queue_size);
343 if (num > MAX_QUEUE_NUM) {
344 vp_write(&vp->common, virtio_pci_common_cfg, queue_size,
345 MAX_QUEUE_NUM);
346 num = vp_read(&vp->common, virtio_pci_common_cfg, queue_size);
347 }
348 } else {
349 num = vp_read(&vp->legacy, virtio_pci_legacy, queue_num);
350 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300351 if (!num) {
352 dprintf(1, "ERROR: queue size is 0\n");
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100353 goto fail;
Gleb Natapov89acfa32010-05-10 11:36:37 +0300354 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300355 if (num > MAX_QUEUE_NUM) {
356 dprintf(1, "ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM);
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100357 goto fail;
Gleb Natapov89acfa32010-05-10 11:36:37 +0300358 }
359
360 /* check if the queue is already active */
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100361 if (vp->use_mmio) {
362 /* TODO */;
363 } else if (vp->use_modern) {
Gerd Hoffmann6c14fef2015-06-26 09:07:59 +0200364 if (vp_read(&vp->common, virtio_pci_common_cfg, queue_enable)) {
365 dprintf(1, "ERROR: queue already active\n");
366 goto fail;
367 }
368 } else {
369 if (vp_read(&vp->legacy, virtio_pci_legacy, queue_pfn)) {
370 dprintf(1, "ERROR: queue already active\n");
371 goto fail;
372 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300373 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300374 vq->queue_index = queue_index;
375
376 /* initialize the queue */
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100377 struct vring * vr = &vq->vring;
Gleb Natapov89acfa32010-05-10 11:36:37 +0300378 vring_init(vr, num, (unsigned char*)&vq->queue);
379
380 /* activate the queue
381 *
382 * NOTE: vr->desc is initialized by vring_init()
383 */
384
Gerd Hoffmann040b92c2020-03-06 19:44:47 +0100385 if (vp->use_mmio) {
386 if (vp_read(&vp->common, virtio_mmio_cfg, version) == 2) {
387 vp_write(&vp->common, virtio_mmio_cfg, queue_desc_lo,
388 (unsigned long)virt_to_phys(vr->desc));
389 vp_write(&vp->common, virtio_mmio_cfg, queue_desc_hi, 0);
390 vp_write(&vp->common, virtio_mmio_cfg, queue_driver_lo,
391 (unsigned long)virt_to_phys(vr->avail));
392 vp_write(&vp->common, virtio_mmio_cfg, queue_driver_hi, 0);
393 vp_write(&vp->common, virtio_mmio_cfg, queue_device_lo,
394 (unsigned long)virt_to_phys(vr->used));
395 vp_write(&vp->common, virtio_mmio_cfg, queue_device_hi, 0);
396 vp_write(&vp->common, virtio_mmio_cfg, queue_ready, 1);
397 } else {
398 vp_write(&vp->common, virtio_mmio_cfg, legacy_guest_page_size,
399 (unsigned long)1 << PAGE_SHIFT);
400 vp_write(&vp->common, virtio_mmio_cfg, legacy_queue_pfn,
401 (unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT);
402 }
403 } else if (vp->use_modern) {
Gerd Hoffmann6c14fef2015-06-26 09:07:59 +0200404 vp_write(&vp->common, virtio_pci_common_cfg, queue_desc_lo,
405 (unsigned long)virt_to_phys(vr->desc));
406 vp_write(&vp->common, virtio_pci_common_cfg, queue_desc_hi, 0);
407 vp_write(&vp->common, virtio_pci_common_cfg, queue_avail_lo,
408 (unsigned long)virt_to_phys(vr->avail));
409 vp_write(&vp->common, virtio_pci_common_cfg, queue_avail_hi, 0);
410 vp_write(&vp->common, virtio_pci_common_cfg, queue_used_lo,
411 (unsigned long)virt_to_phys(vr->used));
412 vp_write(&vp->common, virtio_pci_common_cfg, queue_used_hi, 0);
413 vp_write(&vp->common, virtio_pci_common_cfg, queue_enable, 1);
414 vq->queue_notify_off = vp_read(&vp->common, virtio_pci_common_cfg,
415 queue_notify_off);
416 } else {
417 vp_write(&vp->legacy, virtio_pci_legacy, queue_pfn,
418 (unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT);
419 }
Gleb Natapov89acfa32010-05-10 11:36:37 +0300420 return num;
Paolo Bonzinie1a17bb2011-11-16 13:02:57 +0100421
422fail:
423 free(vq);
424 *p_vq = NULL;
425 return -1;
Gleb Natapov89acfa32010-05-10 11:36:37 +0300426}
Paolo Bonzini4e343322011-11-16 13:02:56 +0100427
Gerd Hoffmann74d0cdc2015-06-25 10:24:27 +0200428void vp_init_simple(struct vp_device *vp, struct pci_device *pci)
Paolo Bonzini4e343322011-11-16 13:02:56 +0100429{
Aleksandr Bezzubikov7de1f652017-08-18 02:33:19 +0300430 u8 cap = pci_find_capability(pci->bdf, PCI_CAP_ID_VNDR, 0);
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200431 struct vp_cap *vp_cap;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200432 const char *mode;
433 u32 offset, base, mul;
434 u64 addr;
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200435 u8 type;
436
437 memset(vp, 0, sizeof(*vp));
438 while (cap != 0) {
439 type = pci_config_readb(pci->bdf, cap +
440 offsetof(struct virtio_pci_cap, cfg_type));
441 switch (type) {
442 case VIRTIO_PCI_CAP_COMMON_CFG:
443 vp_cap = &vp->common;
444 break;
445 case VIRTIO_PCI_CAP_NOTIFY_CFG:
446 vp_cap = &vp->notify;
Gerd Hoffmann2e68fb12015-06-26 08:19:28 +0200447 mul = offsetof(struct virtio_pci_notify_cap, notify_off_multiplier);
448 vp->notify_off_multiplier = pci_config_readl(pci->bdf, cap + mul);
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200449 break;
450 case VIRTIO_PCI_CAP_ISR_CFG:
451 vp_cap = &vp->isr;
452 break;
453 case VIRTIO_PCI_CAP_DEVICE_CFG:
454 vp_cap = &vp->device;
455 break;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200456 case VIRTIO_PCI_CAP_PCI_CFG:
457 vp->common.cfg = cap;
458 vp->common.bdf = pci->bdf;
459 vp->notify.cfg = cap;
460 vp->notify.bdf = pci->bdf;
461 vp->isr.cfg = cap;
462 vp->isr.bdf = pci->bdf;
463 vp->device.cfg = cap;
464 vp->device.bdf = pci->bdf;
465 vp_cap = NULL;
Kevin O'Connor85137fb2019-05-23 11:36:51 -0400466 dprintf(1, "pci dev %pP virtio cap at 0x%x type %d"
467 " [pci cfg access]\n", pci, cap, type);
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200468 break;
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200469 default:
470 vp_cap = NULL;
471 break;
472 }
473 if (vp_cap && !vp_cap->cap) {
474 vp_cap->cap = cap;
475 vp_cap->bar = pci_config_readb(pci->bdf, cap +
476 offsetof(struct virtio_pci_cap, bar));
477 offset = pci_config_readl(pci->bdf, cap +
478 offsetof(struct virtio_pci_cap, offset));
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200479 base = PCI_BASE_ADDRESS_0 + 4 * vp_cap->bar;
480 addr = pci_config_readl(pci->bdf, base);
481 if (addr & PCI_BASE_ADDRESS_SPACE_IO) {
482 addr &= PCI_BASE_ADDRESS_IO_MASK;
483 vp_cap->mode = VP_ACCESS_IO;
484 } else if ((addr & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
485 PCI_BASE_ADDRESS_MEM_TYPE_64) {
486 addr &= PCI_BASE_ADDRESS_MEM_MASK;
487 addr |= (u64)pci_config_readl(pci->bdf, base + 4) << 32;
488 vp_cap->mode = (addr > 0xffffffffll) ?
489 VP_ACCESS_PCICFG : VP_ACCESS_MMIO;
490 } else {
491 addr &= PCI_BASE_ADDRESS_MEM_MASK;
492 vp_cap->mode = VP_ACCESS_MMIO;
493 }
494 switch (vp_cap->mode) {
495 case VP_ACCESS_IO:
496 {
Gerd Hoffmanndc2433e2016-09-16 13:01:46 +0200497 u32 addr = pci_enable_iobar(pci, base);
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500498 if (!addr)
499 return;
500 vp_cap->ioaddr = addr + offset;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200501 mode = "io";
502 break;
503 }
504 case VP_ACCESS_MMIO:
505 {
Gerd Hoffmanndc2433e2016-09-16 13:01:46 +0200506 void *addr = pci_enable_membar(pci, base);
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500507 if (!addr)
508 return;
509 vp_cap->memaddr = addr + offset;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200510 mode = "mmio";
511 break;
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200512 }
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200513 case VP_ACCESS_PCICFG:
514 mode = "pcicfg";
515 vp_cap->baroff = offset;
516 break;
517 default:
518 mode = "Huh?";
519 break;
520 }
Kevin O'Connor85137fb2019-05-23 11:36:51 -0400521 dprintf(1, "pci dev %pP virtio cap at 0x%x type %d "
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200522 "bar %d at 0x%08llx off +0x%04x [%s]\n",
Kevin O'Connor85137fb2019-05-23 11:36:51 -0400523 pci, vp_cap->cap, type, vp_cap->bar, addr, offset, mode);
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200524 }
525
Aleksandr Bezzubikov7de1f652017-08-18 02:33:19 +0300526 cap = pci_find_capability(pci->bdf, PCI_CAP_ID_VNDR, cap);
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200527 }
528
529 if (vp->common.cap && vp->notify.cap && vp->isr.cap && vp->device.cap) {
Kevin O'Connor7b673002016-02-03 03:03:15 -0500530 dprintf(1, "pci dev %pP using modern (1.0) virtio mode\n", pci);
Gerd Hoffmann084eec82015-06-26 08:45:11 +0200531 vp->use_modern = 1;
532 } else {
Kevin O'Connor7b673002016-02-03 03:03:15 -0500533 dprintf(1, "pci dev %pP using legacy (0.9.5) virtio mode\n", pci);
Gerd Hoffmann084eec82015-06-26 08:45:11 +0200534 vp->legacy.bar = 0;
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500535 vp->legacy.ioaddr = pci_enable_iobar(pci, PCI_BASE_ADDRESS_0);
536 if (!vp->legacy.ioaddr)
537 return;
Gerd Hoffmann0e215482015-07-03 11:07:05 +0200538 vp->legacy.mode = VP_ACCESS_IO;
Gerd Hoffmannd8966462015-06-25 16:14:21 +0200539 }
540
Gerd Hoffmanndaf5cc92015-06-25 09:36:16 +0200541 vp_reset(vp);
Kevin O'Connorf46739b2016-02-02 22:34:27 -0500542 pci_enable_busmaster(pci);
Gerd Hoffmanndaf5cc92015-06-25 09:36:16 +0200543 vp_set_status(vp, VIRTIO_CONFIG_S_ACKNOWLEDGE |
Paolo Bonzini4e343322011-11-16 13:02:56 +0100544 VIRTIO_CONFIG_S_DRIVER );
Paolo Bonzini4e343322011-11-16 13:02:56 +0100545}