Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2007, Neocleus Corporation. |
| 3 | * Copyright (c) 2007, Intel Corporation. |
| 4 | * |
| 5 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 6 | * the COPYING file in the top-level directory. |
| 7 | * |
| 8 | * Alex Novik <alex@neocleus.com> |
| 9 | * Allen Kay <allen.m.kay@intel.com> |
| 10 | * Guy Zana <guy@neocleus.com> |
| 11 | * |
| 12 | * This file implements direct PCI assignment to a HVM guest |
| 13 | */ |
| 14 | |
Peter Maydell | 21cbfe5 | 2016-01-26 18:17:06 +0000 | [diff] [blame] | 15 | #include "qemu/osdep.h" |
Markus Armbruster | da34e65 | 2016-03-14 09:01:28 +0100 | [diff] [blame] | 16 | #include "qapi/error.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 17 | #include "qemu/timer.h" |
Paolo Bonzini | 0d09e41 | 2013-02-05 17:06:20 +0100 | [diff] [blame] | 18 | #include "hw/xen/xen_backend.h" |
Paolo Bonzini | 47b43a1 | 2013-03-18 17:36:02 +0100 | [diff] [blame] | 19 | #include "xen_pt.h" |
Allen Kay | eaab4d6 | 2012-06-21 15:40:09 +0000 | [diff] [blame] | 20 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 21 | #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ |
| 22 | (((value) & (val_mask)) | ((data) & ~(val_mask))) |
| 23 | |
| 24 | #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ |
| 25 | |
| 26 | /* prototype */ |
| 27 | |
| 28 | static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, |
| 29 | uint32_t real_offset, uint32_t *data); |
| 30 | |
| 31 | |
| 32 | /* helper */ |
| 33 | |
| 34 | /* A return value of 1 means the capability should NOT be exposed to guest. */ |
| 35 | static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) |
| 36 | { |
| 37 | switch (grp_id) { |
| 38 | case PCI_CAP_ID_EXP: |
| 39 | /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE |
| 40 | * Controller looks trivial, e.g., the PCI Express Capabilities |
| 41 | * Register is 0. We should not try to expose it to guest. |
| 42 | * |
| 43 | * The datasheet is available at |
| 44 | * http://download.intel.com/design/network/datashts/82599_datasheet.pdf |
| 45 | * |
| 46 | * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the |
| 47 | * PCI Express Capability Structure of the VF of Intel 82599 10GbE |
| 48 | * Controller looks trivial, e.g., the PCI Express Capabilities |
| 49 | * Register is 0, so the Capability Version is 0 and |
| 50 | * xen_pt_pcie_size_init() would fail. |
| 51 | */ |
| 52 | if (d->vendor_id == PCI_VENDOR_ID_INTEL && |
| 53 | d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { |
| 54 | return 1; |
| 55 | } |
| 56 | break; |
| 57 | } |
| 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | /* find emulate register group entry */ |
Allen Kay | eaab4d6 | 2012-06-21 15:40:09 +0000 | [diff] [blame] | 62 | XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) |
| 63 | { |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 64 | XenPTRegGroup *entry = NULL; |
| 65 | |
| 66 | /* find register group entry */ |
| 67 | QLIST_FOREACH(entry, &s->reg_grps, entries) { |
| 68 | /* check address */ |
| 69 | if ((entry->base_offset <= address) |
| 70 | && ((entry->base_offset + entry->size) > address)) { |
| 71 | return entry; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | /* group entry not found */ |
Allen Kay | eaab4d6 | 2012-06-21 15:40:09 +0000 | [diff] [blame] | 76 | return NULL; |
| 77 | } |
| 78 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 79 | /* find emulate register entry */ |
Allen Kay | eaab4d6 | 2012-06-21 15:40:09 +0000 | [diff] [blame] | 80 | XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) |
| 81 | { |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 82 | XenPTReg *reg_entry = NULL; |
| 83 | XenPTRegInfo *reg = NULL; |
| 84 | uint32_t real_offset = 0; |
| 85 | |
| 86 | /* find register entry */ |
| 87 | QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { |
| 88 | reg = reg_entry->reg; |
| 89 | real_offset = reg_grp->base_offset + reg->offset; |
| 90 | /* check address */ |
| 91 | if ((real_offset <= address) |
| 92 | && ((real_offset + reg->size) > address)) { |
| 93 | return reg_entry; |
| 94 | } |
| 95 | } |
| 96 | |
Allen Kay | eaab4d6 | 2012-06-21 15:40:09 +0000 | [diff] [blame] | 97 | return NULL; |
| 98 | } |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 99 | |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 100 | static uint32_t get_throughable_mask(const XenPCIPassthroughState *s, |
Jan Beulich | 74526eb | 2015-06-05 13:04:55 +0100 | [diff] [blame] | 101 | XenPTRegInfo *reg, uint32_t valid_mask) |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 102 | { |
| 103 | uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask); |
| 104 | |
Jan Beulich | c25bbf1 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 105 | if (!s->permissive) { |
| 106 | throughable_mask &= ~reg->res_mask; |
| 107 | } |
| 108 | |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 109 | return throughable_mask & valid_mask; |
| 110 | } |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 111 | |
| 112 | /**************** |
| 113 | * general register functions |
| 114 | */ |
| 115 | |
| 116 | /* register initialization function */ |
| 117 | |
| 118 | static int xen_pt_common_reg_init(XenPCIPassthroughState *s, |
| 119 | XenPTRegInfo *reg, uint32_t real_offset, |
| 120 | uint32_t *data) |
| 121 | { |
| 122 | *data = reg->init_val; |
| 123 | return 0; |
| 124 | } |
| 125 | |
| 126 | /* Read register functions */ |
| 127 | |
| 128 | static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 129 | uint8_t *value, uint8_t valid_mask) |
| 130 | { |
| 131 | XenPTRegInfo *reg = cfg_entry->reg; |
| 132 | uint8_t valid_emu_mask = 0; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 133 | uint8_t *data = cfg_entry->ptr.byte; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 134 | |
| 135 | /* emulate byte register */ |
| 136 | valid_emu_mask = reg->emu_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 137 | *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 138 | |
| 139 | return 0; |
| 140 | } |
| 141 | static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 142 | uint16_t *value, uint16_t valid_mask) |
| 143 | { |
| 144 | XenPTRegInfo *reg = cfg_entry->reg; |
| 145 | uint16_t valid_emu_mask = 0; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 146 | uint16_t *data = cfg_entry->ptr.half_word; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 147 | |
| 148 | /* emulate word register */ |
| 149 | valid_emu_mask = reg->emu_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 150 | *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 151 | |
| 152 | return 0; |
| 153 | } |
| 154 | static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 155 | uint32_t *value, uint32_t valid_mask) |
| 156 | { |
| 157 | XenPTRegInfo *reg = cfg_entry->reg; |
| 158 | uint32_t valid_emu_mask = 0; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 159 | uint32_t *data = cfg_entry->ptr.word; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 160 | |
| 161 | /* emulate long register */ |
| 162 | valid_emu_mask = reg->emu_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 163 | *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 164 | |
| 165 | return 0; |
| 166 | } |
| 167 | |
| 168 | /* Write register functions */ |
| 169 | |
| 170 | static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 171 | uint8_t *val, uint8_t dev_value, |
| 172 | uint8_t valid_mask) |
| 173 | { |
| 174 | XenPTRegInfo *reg = cfg_entry->reg; |
| 175 | uint8_t writable_mask = 0; |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 176 | uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask); |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 177 | uint8_t *data = cfg_entry->ptr.byte; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 178 | |
| 179 | /* modify emulate register */ |
| 180 | writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 181 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 182 | |
| 183 | /* create value for writing to I/O device register */ |
Jan Beulich | 55c8672 | 2015-12-09 15:47:28 +0000 | [diff] [blame] | 184 | *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, |
| 185 | throughable_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 186 | |
| 187 | return 0; |
| 188 | } |
| 189 | static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 190 | uint16_t *val, uint16_t dev_value, |
| 191 | uint16_t valid_mask) |
| 192 | { |
| 193 | XenPTRegInfo *reg = cfg_entry->reg; |
| 194 | uint16_t writable_mask = 0; |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 195 | uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 196 | uint16_t *data = cfg_entry->ptr.half_word; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 197 | |
| 198 | /* modify emulate register */ |
| 199 | writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 200 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 201 | |
| 202 | /* create value for writing to I/O device register */ |
Jan Beulich | 55c8672 | 2015-12-09 15:47:28 +0000 | [diff] [blame] | 203 | *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, |
| 204 | throughable_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 205 | |
| 206 | return 0; |
| 207 | } |
| 208 | static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 209 | uint32_t *val, uint32_t dev_value, |
| 210 | uint32_t valid_mask) |
| 211 | { |
| 212 | XenPTRegInfo *reg = cfg_entry->reg; |
| 213 | uint32_t writable_mask = 0; |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 214 | uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 215 | uint32_t *data = cfg_entry->ptr.word; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 216 | |
| 217 | /* modify emulate register */ |
| 218 | writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 219 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 220 | |
| 221 | /* create value for writing to I/O device register */ |
Jan Beulich | 55c8672 | 2015-12-09 15:47:28 +0000 | [diff] [blame] | 222 | *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, |
| 223 | throughable_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 224 | |
| 225 | return 0; |
| 226 | } |
| 227 | |
| 228 | |
| 229 | /* XenPTRegInfo declaration |
| 230 | * - only for emulated register (either a part or whole bit). |
| 231 | * - for passthrough register that need special behavior (like interacting with |
| 232 | * other component), set emu_mask to all 0 and specify r/w func properly. |
| 233 | * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. |
| 234 | */ |
| 235 | |
| 236 | /******************** |
| 237 | * Header Type0 |
| 238 | */ |
| 239 | |
| 240 | static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, |
| 241 | XenPTRegInfo *reg, uint32_t real_offset, |
| 242 | uint32_t *data) |
| 243 | { |
| 244 | *data = s->real_device.vendor_id; |
| 245 | return 0; |
| 246 | } |
| 247 | static int xen_pt_device_reg_init(XenPCIPassthroughState *s, |
| 248 | XenPTRegInfo *reg, uint32_t real_offset, |
| 249 | uint32_t *data) |
| 250 | { |
| 251 | *data = s->real_device.device_id; |
| 252 | return 0; |
| 253 | } |
| 254 | static int xen_pt_status_reg_init(XenPCIPassthroughState *s, |
| 255 | XenPTRegInfo *reg, uint32_t real_offset, |
| 256 | uint32_t *data) |
| 257 | { |
| 258 | XenPTRegGroup *reg_grp_entry = NULL; |
| 259 | XenPTReg *reg_entry = NULL; |
| 260 | uint32_t reg_field = 0; |
| 261 | |
| 262 | /* find Header register group */ |
| 263 | reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); |
| 264 | if (reg_grp_entry) { |
| 265 | /* find Capabilities Pointer register */ |
| 266 | reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); |
| 267 | if (reg_entry) { |
| 268 | /* check Capabilities Pointer register */ |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 269 | if (*reg_entry->ptr.half_word) { |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 270 | reg_field |= PCI_STATUS_CAP_LIST; |
| 271 | } else { |
| 272 | reg_field &= ~PCI_STATUS_CAP_LIST; |
| 273 | } |
| 274 | } else { |
| 275 | xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" |
| 276 | " for Capabilities Pointer register." |
| 277 | " (%s)\n", __func__); |
| 278 | return -1; |
| 279 | } |
| 280 | } else { |
| 281 | xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" |
| 282 | " for Header. (%s)\n", __func__); |
| 283 | return -1; |
| 284 | } |
| 285 | |
| 286 | *data = reg_field; |
| 287 | return 0; |
| 288 | } |
| 289 | static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, |
| 290 | XenPTRegInfo *reg, uint32_t real_offset, |
| 291 | uint32_t *data) |
| 292 | { |
| 293 | /* read PCI_HEADER_TYPE */ |
| 294 | *data = reg->init_val | 0x80; |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | /* initialize Interrupt Pin register */ |
| 299 | static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, |
| 300 | XenPTRegInfo *reg, uint32_t real_offset, |
| 301 | uint32_t *data) |
| 302 | { |
| 303 | *data = xen_pt_pci_read_intx(s); |
| 304 | return 0; |
| 305 | } |
| 306 | |
| 307 | /* Command register */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 308 | static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 309 | uint16_t *val, uint16_t dev_value, |
| 310 | uint16_t valid_mask) |
| 311 | { |
| 312 | XenPTRegInfo *reg = cfg_entry->reg; |
| 313 | uint16_t writable_mask = 0; |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 314 | uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 315 | uint16_t *data = cfg_entry->ptr.half_word; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 316 | |
| 317 | /* modify emulate register */ |
| 318 | writable_mask = ~reg->ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 319 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 320 | |
| 321 | /* create value for writing to I/O device register */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 322 | if (*val & PCI_COMMAND_INTX_DISABLE) { |
| 323 | throughable_mask |= PCI_COMMAND_INTX_DISABLE; |
| 324 | } else { |
| 325 | if (s->machine_irq) { |
| 326 | throughable_mask |= PCI_COMMAND_INTX_DISABLE; |
| 327 | } |
| 328 | } |
| 329 | |
| 330 | *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
| 331 | |
| 332 | return 0; |
| 333 | } |
| 334 | |
| 335 | /* BAR */ |
| 336 | #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ |
| 337 | #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ |
| 338 | #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ |
| 339 | #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ |
| 340 | |
Xudong Hao | aabc853 | 2012-10-03 13:46:23 +0000 | [diff] [blame] | 341 | static bool is_64bit_bar(PCIIORegion *r) |
| 342 | { |
| 343 | return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); |
| 344 | } |
| 345 | |
| 346 | static uint64_t xen_pt_get_bar_size(PCIIORegion *r) |
| 347 | { |
| 348 | if (is_64bit_bar(r)) { |
| 349 | uint64_t size64; |
| 350 | size64 = (r + 1)->size; |
| 351 | size64 <<= 32; |
| 352 | size64 += r->size; |
| 353 | return size64; |
| 354 | } |
| 355 | return r->size; |
| 356 | } |
| 357 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 358 | static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, |
Gonglei | d4cd450 | 2015-02-10 15:51:22 +0800 | [diff] [blame] | 359 | int index) |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 360 | { |
| 361 | PCIDevice *d = &s->dev; |
| 362 | XenPTRegion *region = NULL; |
| 363 | PCIIORegion *r; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 364 | |
| 365 | /* check 64bit BAR */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 366 | if ((0 < index) && (index < PCI_ROM_SLOT)) { |
| 367 | int type = s->real_device.io_regions[index - 1].type; |
| 368 | |
| 369 | if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) |
| 370 | && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { |
| 371 | region = &s->bases[index - 1]; |
| 372 | if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { |
| 373 | return XEN_PT_BAR_FLAG_UPPER; |
| 374 | } |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | /* check unused BAR */ |
| 379 | r = &d->io_regions[index]; |
Xudong Hao | aabc853 | 2012-10-03 13:46:23 +0000 | [diff] [blame] | 380 | if (!xen_pt_get_bar_size(r)) { |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 381 | return XEN_PT_BAR_FLAG_UNUSED; |
| 382 | } |
| 383 | |
| 384 | /* for ExpROM BAR */ |
| 385 | if (index == PCI_ROM_SLOT) { |
| 386 | return XEN_PT_BAR_FLAG_MEM; |
| 387 | } |
| 388 | |
| 389 | /* check BAR I/O indicator */ |
| 390 | if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { |
| 391 | return XEN_PT_BAR_FLAG_IO; |
| 392 | } else { |
| 393 | return XEN_PT_BAR_FLAG_MEM; |
| 394 | } |
| 395 | } |
| 396 | |
| 397 | static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) |
| 398 | { |
| 399 | if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { |
| 400 | return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); |
| 401 | } else { |
| 402 | return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, |
| 407 | uint32_t real_offset, uint32_t *data) |
| 408 | { |
| 409 | uint32_t reg_field = 0; |
| 410 | int index; |
| 411 | |
| 412 | index = xen_pt_bar_offset_to_index(reg->offset); |
| 413 | if (index < 0 || index >= PCI_NUM_REGIONS) { |
| 414 | XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); |
| 415 | return -1; |
| 416 | } |
| 417 | |
| 418 | /* set BAR flag */ |
Gonglei | d4cd450 | 2015-02-10 15:51:22 +0800 | [diff] [blame] | 419 | s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 420 | if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { |
| 421 | reg_field = XEN_PT_INVALID_REG; |
| 422 | } |
| 423 | |
| 424 | *data = reg_field; |
| 425 | return 0; |
| 426 | } |
| 427 | static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 428 | uint32_t *value, uint32_t valid_mask) |
| 429 | { |
| 430 | XenPTRegInfo *reg = cfg_entry->reg; |
| 431 | uint32_t valid_emu_mask = 0; |
| 432 | uint32_t bar_emu_mask = 0; |
| 433 | int index; |
| 434 | |
| 435 | /* get BAR index */ |
| 436 | index = xen_pt_bar_offset_to_index(reg->offset); |
Gonglei | 14cec17 | 2015-02-10 15:51:23 +0800 | [diff] [blame] | 437 | if (index < 0 || index >= PCI_NUM_REGIONS - 1) { |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 438 | XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); |
| 439 | return -1; |
| 440 | } |
| 441 | |
| 442 | /* use fixed-up value from kernel sysfs */ |
| 443 | *value = base_address_with_flags(&s->real_device.io_regions[index]); |
| 444 | |
| 445 | /* set emulate mask depend on BAR flag */ |
| 446 | switch (s->bases[index].bar_flag) { |
| 447 | case XEN_PT_BAR_FLAG_MEM: |
| 448 | bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; |
| 449 | break; |
| 450 | case XEN_PT_BAR_FLAG_IO: |
| 451 | bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; |
| 452 | break; |
| 453 | case XEN_PT_BAR_FLAG_UPPER: |
| 454 | bar_emu_mask = XEN_PT_BAR_ALLF; |
| 455 | break; |
| 456 | default: |
| 457 | break; |
| 458 | } |
| 459 | |
| 460 | /* emulate BAR */ |
| 461 | valid_emu_mask = bar_emu_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 462 | *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 463 | |
| 464 | return 0; |
| 465 | } |
| 466 | static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 467 | uint32_t *val, uint32_t dev_value, |
| 468 | uint32_t valid_mask) |
| 469 | { |
| 470 | XenPTRegInfo *reg = cfg_entry->reg; |
| 471 | XenPTRegion *base = NULL; |
| 472 | PCIDevice *d = &s->dev; |
| 473 | const PCIIORegion *r; |
| 474 | uint32_t writable_mask = 0; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 475 | uint32_t bar_emu_mask = 0; |
| 476 | uint32_t bar_ro_mask = 0; |
| 477 | uint32_t r_size = 0; |
| 478 | int index = 0; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 479 | uint32_t *data = cfg_entry->ptr.word; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 480 | |
| 481 | index = xen_pt_bar_offset_to_index(reg->offset); |
| 482 | if (index < 0 || index >= PCI_NUM_REGIONS) { |
| 483 | XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); |
| 484 | return -1; |
| 485 | } |
| 486 | |
| 487 | r = &d->io_regions[index]; |
| 488 | base = &s->bases[index]; |
| 489 | r_size = xen_pt_get_emul_size(base->bar_flag, r->size); |
| 490 | |
| 491 | /* set emulate mask and read-only mask values depend on the BAR flag */ |
| 492 | switch (s->bases[index].bar_flag) { |
| 493 | case XEN_PT_BAR_FLAG_MEM: |
| 494 | bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; |
Xudong Hao | aabc853 | 2012-10-03 13:46:23 +0000 | [diff] [blame] | 495 | if (!r_size) { |
| 496 | /* low 32 bits mask for 64 bit bars */ |
| 497 | bar_ro_mask = XEN_PT_BAR_ALLF; |
| 498 | } else { |
| 499 | bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); |
| 500 | } |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 501 | break; |
| 502 | case XEN_PT_BAR_FLAG_IO: |
| 503 | bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; |
| 504 | bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); |
| 505 | break; |
| 506 | case XEN_PT_BAR_FLAG_UPPER: |
| 507 | bar_emu_mask = XEN_PT_BAR_ALLF; |
Xudong Hao | aabc853 | 2012-10-03 13:46:23 +0000 | [diff] [blame] | 508 | bar_ro_mask = r_size ? r_size - 1 : 0; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 509 | break; |
| 510 | default: |
| 511 | break; |
| 512 | } |
| 513 | |
| 514 | /* modify emulate register */ |
| 515 | writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 516 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 517 | |
| 518 | /* check whether we need to update the virtual region address or not */ |
| 519 | switch (s->bases[index].bar_flag) { |
Xudong Hao | aabc853 | 2012-10-03 13:46:23 +0000 | [diff] [blame] | 520 | case XEN_PT_BAR_FLAG_UPPER: |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 521 | case XEN_PT_BAR_FLAG_MEM: |
| 522 | /* nothing to do */ |
| 523 | break; |
| 524 | case XEN_PT_BAR_FLAG_IO: |
| 525 | /* nothing to do */ |
| 526 | break; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 527 | default: |
| 528 | break; |
| 529 | } |
| 530 | |
| 531 | /* create value for writing to I/O device register */ |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 532 | *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 533 | |
| 534 | return 0; |
| 535 | } |
| 536 | |
| 537 | /* write Exp ROM BAR */ |
| 538 | static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, |
| 539 | XenPTReg *cfg_entry, uint32_t *val, |
| 540 | uint32_t dev_value, uint32_t valid_mask) |
| 541 | { |
| 542 | XenPTRegInfo *reg = cfg_entry->reg; |
| 543 | XenPTRegion *base = NULL; |
| 544 | PCIDevice *d = (PCIDevice *)&s->dev; |
| 545 | uint32_t writable_mask = 0; |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 546 | uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 547 | pcibus_t r_size = 0; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 548 | uint32_t bar_ro_mask = 0; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 549 | uint32_t *data = cfg_entry->ptr.word; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 550 | |
| 551 | r_size = d->io_regions[PCI_ROM_SLOT].size; |
| 552 | base = &s->bases[PCI_ROM_SLOT]; |
| 553 | /* align memory type resource size */ |
| 554 | r_size = xen_pt_get_emul_size(base->bar_flag, r_size); |
| 555 | |
| 556 | /* set emulate mask and read-only mask */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 557 | bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; |
| 558 | |
| 559 | /* modify emulate register */ |
| 560 | writable_mask = ~bar_ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 561 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 562 | |
| 563 | /* create value for writing to I/O device register */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 564 | *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
| 565 | |
| 566 | return 0; |
| 567 | } |
| 568 | |
Tiejun Chen | 5cec8aa | 2015-07-15 13:37:50 +0800 | [diff] [blame] | 569 | static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s, |
| 570 | XenPTReg *cfg_entry, |
| 571 | uint32_t *value, uint32_t valid_mask) |
| 572 | { |
| 573 | *value = igd_read_opregion(s); |
| 574 | return 0; |
| 575 | } |
| 576 | |
| 577 | static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s, |
| 578 | XenPTReg *cfg_entry, uint32_t *value, |
| 579 | uint32_t dev_value, uint32_t valid_mask) |
| 580 | { |
| 581 | igd_write_opregion(s, *value); |
| 582 | return 0; |
| 583 | } |
| 584 | |
Stefan Weil | 0546b8c | 2012-08-10 22:03:25 +0200 | [diff] [blame] | 585 | /* Header Type0 reg static information table */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 586 | static XenPTRegInfo xen_pt_emu_reg_header0[] = { |
| 587 | /* Vendor ID reg */ |
| 588 | { |
| 589 | .offset = PCI_VENDOR_ID, |
| 590 | .size = 2, |
| 591 | .init_val = 0x0000, |
| 592 | .ro_mask = 0xFFFF, |
| 593 | .emu_mask = 0xFFFF, |
| 594 | .init = xen_pt_vendor_reg_init, |
| 595 | .u.w.read = xen_pt_word_reg_read, |
| 596 | .u.w.write = xen_pt_word_reg_write, |
| 597 | }, |
| 598 | /* Device ID reg */ |
| 599 | { |
| 600 | .offset = PCI_DEVICE_ID, |
| 601 | .size = 2, |
| 602 | .init_val = 0x0000, |
| 603 | .ro_mask = 0xFFFF, |
| 604 | .emu_mask = 0xFFFF, |
| 605 | .init = xen_pt_device_reg_init, |
| 606 | .u.w.read = xen_pt_word_reg_read, |
| 607 | .u.w.write = xen_pt_word_reg_write, |
| 608 | }, |
| 609 | /* Command reg */ |
| 610 | { |
| 611 | .offset = PCI_COMMAND, |
| 612 | .size = 2, |
| 613 | .init_val = 0x0000, |
Jan Beulich | 0ad3393 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 614 | .res_mask = 0xF880, |
Jan Beulich | 81b23ef | 2015-03-31 15:18:03 +0100 | [diff] [blame] | 615 | .emu_mask = 0x0743, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 616 | .init = xen_pt_common_reg_init, |
Jan Beulich | 81b23ef | 2015-03-31 15:18:03 +0100 | [diff] [blame] | 617 | .u.w.read = xen_pt_word_reg_read, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 618 | .u.w.write = xen_pt_cmd_reg_write, |
| 619 | }, |
| 620 | /* Capabilities Pointer reg */ |
| 621 | { |
| 622 | .offset = PCI_CAPABILITY_LIST, |
| 623 | .size = 1, |
| 624 | .init_val = 0x00, |
| 625 | .ro_mask = 0xFF, |
| 626 | .emu_mask = 0xFF, |
| 627 | .init = xen_pt_ptr_reg_init, |
| 628 | .u.b.read = xen_pt_byte_reg_read, |
| 629 | .u.b.write = xen_pt_byte_reg_write, |
| 630 | }, |
| 631 | /* Status reg */ |
| 632 | /* use emulated Cap Ptr value to initialize, |
| 633 | * so need to be declared after Cap Ptr reg |
| 634 | */ |
| 635 | { |
| 636 | .offset = PCI_STATUS, |
| 637 | .size = 2, |
| 638 | .init_val = 0x0000, |
Jan Beulich | 0ad3393 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 639 | .res_mask = 0x0007, |
| 640 | .ro_mask = 0x06F8, |
Jan Beulich | 55c8672 | 2015-12-09 15:47:28 +0000 | [diff] [blame] | 641 | .rw1c_mask = 0xF900, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 642 | .emu_mask = 0x0010, |
| 643 | .init = xen_pt_status_reg_init, |
| 644 | .u.w.read = xen_pt_word_reg_read, |
| 645 | .u.w.write = xen_pt_word_reg_write, |
| 646 | }, |
| 647 | /* Cache Line Size reg */ |
| 648 | { |
| 649 | .offset = PCI_CACHE_LINE_SIZE, |
| 650 | .size = 1, |
| 651 | .init_val = 0x00, |
| 652 | .ro_mask = 0x00, |
| 653 | .emu_mask = 0xFF, |
| 654 | .init = xen_pt_common_reg_init, |
| 655 | .u.b.read = xen_pt_byte_reg_read, |
| 656 | .u.b.write = xen_pt_byte_reg_write, |
| 657 | }, |
| 658 | /* Latency Timer reg */ |
| 659 | { |
| 660 | .offset = PCI_LATENCY_TIMER, |
| 661 | .size = 1, |
| 662 | .init_val = 0x00, |
| 663 | .ro_mask = 0x00, |
| 664 | .emu_mask = 0xFF, |
| 665 | .init = xen_pt_common_reg_init, |
| 666 | .u.b.read = xen_pt_byte_reg_read, |
| 667 | .u.b.write = xen_pt_byte_reg_write, |
| 668 | }, |
| 669 | /* Header Type reg */ |
| 670 | { |
| 671 | .offset = PCI_HEADER_TYPE, |
| 672 | .size = 1, |
| 673 | .init_val = 0x00, |
| 674 | .ro_mask = 0xFF, |
| 675 | .emu_mask = 0x00, |
| 676 | .init = xen_pt_header_type_reg_init, |
| 677 | .u.b.read = xen_pt_byte_reg_read, |
| 678 | .u.b.write = xen_pt_byte_reg_write, |
| 679 | }, |
| 680 | /* Interrupt Line reg */ |
| 681 | { |
| 682 | .offset = PCI_INTERRUPT_LINE, |
| 683 | .size = 1, |
| 684 | .init_val = 0x00, |
| 685 | .ro_mask = 0x00, |
| 686 | .emu_mask = 0xFF, |
| 687 | .init = xen_pt_common_reg_init, |
| 688 | .u.b.read = xen_pt_byte_reg_read, |
| 689 | .u.b.write = xen_pt_byte_reg_write, |
| 690 | }, |
| 691 | /* Interrupt Pin reg */ |
| 692 | { |
| 693 | .offset = PCI_INTERRUPT_PIN, |
| 694 | .size = 1, |
| 695 | .init_val = 0x00, |
| 696 | .ro_mask = 0xFF, |
| 697 | .emu_mask = 0xFF, |
| 698 | .init = xen_pt_irqpin_reg_init, |
| 699 | .u.b.read = xen_pt_byte_reg_read, |
| 700 | .u.b.write = xen_pt_byte_reg_write, |
| 701 | }, |
| 702 | /* BAR 0 reg */ |
| 703 | /* mask of BAR need to be decided later, depends on IO/MEM type */ |
| 704 | { |
| 705 | .offset = PCI_BASE_ADDRESS_0, |
| 706 | .size = 4, |
| 707 | .init_val = 0x00000000, |
| 708 | .init = xen_pt_bar_reg_init, |
| 709 | .u.dw.read = xen_pt_bar_reg_read, |
| 710 | .u.dw.write = xen_pt_bar_reg_write, |
| 711 | }, |
| 712 | /* BAR 1 reg */ |
| 713 | { |
| 714 | .offset = PCI_BASE_ADDRESS_1, |
| 715 | .size = 4, |
| 716 | .init_val = 0x00000000, |
| 717 | .init = xen_pt_bar_reg_init, |
| 718 | .u.dw.read = xen_pt_bar_reg_read, |
| 719 | .u.dw.write = xen_pt_bar_reg_write, |
| 720 | }, |
| 721 | /* BAR 2 reg */ |
| 722 | { |
| 723 | .offset = PCI_BASE_ADDRESS_2, |
| 724 | .size = 4, |
| 725 | .init_val = 0x00000000, |
| 726 | .init = xen_pt_bar_reg_init, |
| 727 | .u.dw.read = xen_pt_bar_reg_read, |
| 728 | .u.dw.write = xen_pt_bar_reg_write, |
| 729 | }, |
| 730 | /* BAR 3 reg */ |
| 731 | { |
| 732 | .offset = PCI_BASE_ADDRESS_3, |
| 733 | .size = 4, |
| 734 | .init_val = 0x00000000, |
| 735 | .init = xen_pt_bar_reg_init, |
| 736 | .u.dw.read = xen_pt_bar_reg_read, |
| 737 | .u.dw.write = xen_pt_bar_reg_write, |
| 738 | }, |
| 739 | /* BAR 4 reg */ |
| 740 | { |
| 741 | .offset = PCI_BASE_ADDRESS_4, |
| 742 | .size = 4, |
| 743 | .init_val = 0x00000000, |
| 744 | .init = xen_pt_bar_reg_init, |
| 745 | .u.dw.read = xen_pt_bar_reg_read, |
| 746 | .u.dw.write = xen_pt_bar_reg_write, |
| 747 | }, |
| 748 | /* BAR 5 reg */ |
| 749 | { |
| 750 | .offset = PCI_BASE_ADDRESS_5, |
| 751 | .size = 4, |
| 752 | .init_val = 0x00000000, |
| 753 | .init = xen_pt_bar_reg_init, |
| 754 | .u.dw.read = xen_pt_bar_reg_read, |
| 755 | .u.dw.write = xen_pt_bar_reg_write, |
| 756 | }, |
| 757 | /* Expansion ROM BAR reg */ |
| 758 | { |
| 759 | .offset = PCI_ROM_ADDRESS, |
| 760 | .size = 4, |
| 761 | .init_val = 0x00000000, |
Jan Beulich | 6997689 | 2015-06-08 14:11:51 +0100 | [diff] [blame] | 762 | .ro_mask = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE, |
| 763 | .emu_mask = (uint32_t)PCI_ROM_ADDRESS_MASK, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 764 | .init = xen_pt_bar_reg_init, |
| 765 | .u.dw.read = xen_pt_long_reg_read, |
| 766 | .u.dw.write = xen_pt_exp_rom_bar_reg_write, |
| 767 | }, |
| 768 | { |
| 769 | .size = 0, |
| 770 | }, |
| 771 | }; |
| 772 | |
| 773 | |
| 774 | /********************************* |
| 775 | * Vital Product Data Capability |
| 776 | */ |
| 777 | |
Stefan Weil | 0546b8c | 2012-08-10 22:03:25 +0200 | [diff] [blame] | 778 | /* Vital Product Data Capability Structure reg static information table */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 779 | static XenPTRegInfo xen_pt_emu_reg_vpd[] = { |
| 780 | { |
| 781 | .offset = PCI_CAP_LIST_NEXT, |
| 782 | .size = 1, |
| 783 | .init_val = 0x00, |
| 784 | .ro_mask = 0xFF, |
| 785 | .emu_mask = 0xFF, |
| 786 | .init = xen_pt_ptr_reg_init, |
| 787 | .u.b.read = xen_pt_byte_reg_read, |
| 788 | .u.b.write = xen_pt_byte_reg_write, |
| 789 | }, |
| 790 | { |
Jan Beulich | a88a3f8 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 791 | .offset = PCI_VPD_ADDR, |
| 792 | .size = 2, |
| 793 | .ro_mask = 0x0003, |
| 794 | .emu_mask = 0x0003, |
| 795 | .init = xen_pt_common_reg_init, |
| 796 | .u.w.read = xen_pt_word_reg_read, |
| 797 | .u.w.write = xen_pt_word_reg_write, |
| 798 | }, |
| 799 | { |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 800 | .size = 0, |
| 801 | }, |
| 802 | }; |
| 803 | |
| 804 | |
| 805 | /************************************** |
| 806 | * Vendor Specific Capability |
| 807 | */ |
| 808 | |
Stefan Weil | 0546b8c | 2012-08-10 22:03:25 +0200 | [diff] [blame] | 809 | /* Vendor Specific Capability Structure reg static information table */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 810 | static XenPTRegInfo xen_pt_emu_reg_vendor[] = { |
| 811 | { |
| 812 | .offset = PCI_CAP_LIST_NEXT, |
| 813 | .size = 1, |
| 814 | .init_val = 0x00, |
| 815 | .ro_mask = 0xFF, |
| 816 | .emu_mask = 0xFF, |
| 817 | .init = xen_pt_ptr_reg_init, |
| 818 | .u.b.read = xen_pt_byte_reg_read, |
| 819 | .u.b.write = xen_pt_byte_reg_write, |
| 820 | }, |
| 821 | { |
| 822 | .size = 0, |
| 823 | }, |
| 824 | }; |
| 825 | |
| 826 | |
| 827 | /***************************** |
| 828 | * PCI Express Capability |
| 829 | */ |
| 830 | |
| 831 | static inline uint8_t get_capability_version(XenPCIPassthroughState *s, |
| 832 | uint32_t offset) |
| 833 | { |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 834 | uint8_t flag; |
| 835 | if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { |
| 836 | return 0; |
| 837 | } |
| 838 | return flag & PCI_EXP_FLAGS_VERS; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 839 | } |
| 840 | |
| 841 | static inline uint8_t get_device_type(XenPCIPassthroughState *s, |
| 842 | uint32_t offset) |
| 843 | { |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 844 | uint8_t flag; |
| 845 | if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { |
| 846 | return 0; |
| 847 | } |
| 848 | return (flag & PCI_EXP_FLAGS_TYPE) >> 4; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 849 | } |
| 850 | |
| 851 | /* initialize Link Control register */ |
| 852 | static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, |
| 853 | XenPTRegInfo *reg, uint32_t real_offset, |
| 854 | uint32_t *data) |
| 855 | { |
| 856 | uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); |
| 857 | uint8_t dev_type = get_device_type(s, real_offset - reg->offset); |
| 858 | |
| 859 | /* no need to initialize in case of Root Complex Integrated Endpoint |
| 860 | * with cap_ver 1.x |
| 861 | */ |
| 862 | if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { |
| 863 | *data = XEN_PT_INVALID_REG; |
| 864 | } |
| 865 | |
| 866 | *data = reg->init_val; |
| 867 | return 0; |
| 868 | } |
| 869 | /* initialize Device Control 2 register */ |
| 870 | static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, |
| 871 | XenPTRegInfo *reg, uint32_t real_offset, |
| 872 | uint32_t *data) |
| 873 | { |
| 874 | uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); |
| 875 | |
| 876 | /* no need to initialize in case of cap_ver 1.x */ |
| 877 | if (cap_ver == 1) { |
| 878 | *data = XEN_PT_INVALID_REG; |
| 879 | } |
| 880 | |
| 881 | *data = reg->init_val; |
| 882 | return 0; |
| 883 | } |
| 884 | /* initialize Link Control 2 register */ |
| 885 | static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, |
| 886 | XenPTRegInfo *reg, uint32_t real_offset, |
| 887 | uint32_t *data) |
| 888 | { |
| 889 | uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); |
| 890 | uint32_t reg_field = 0; |
| 891 | |
| 892 | /* no need to initialize in case of cap_ver 1.x */ |
| 893 | if (cap_ver == 1) { |
| 894 | reg_field = XEN_PT_INVALID_REG; |
| 895 | } else { |
| 896 | /* set Supported Link Speed */ |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 897 | uint8_t lnkcap; |
| 898 | int rc; |
| 899 | rc = xen_host_pci_get_byte(&s->real_device, |
| 900 | real_offset - reg->offset + PCI_EXP_LNKCAP, |
| 901 | &lnkcap); |
| 902 | if (rc) { |
| 903 | return rc; |
| 904 | } |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 905 | reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; |
| 906 | } |
| 907 | |
| 908 | *data = reg_field; |
| 909 | return 0; |
| 910 | } |
| 911 | |
Stefan Weil | 0546b8c | 2012-08-10 22:03:25 +0200 | [diff] [blame] | 912 | /* PCI Express Capability Structure reg static information table */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 913 | static XenPTRegInfo xen_pt_emu_reg_pcie[] = { |
| 914 | /* Next Pointer reg */ |
| 915 | { |
| 916 | .offset = PCI_CAP_LIST_NEXT, |
| 917 | .size = 1, |
| 918 | .init_val = 0x00, |
| 919 | .ro_mask = 0xFF, |
| 920 | .emu_mask = 0xFF, |
| 921 | .init = xen_pt_ptr_reg_init, |
| 922 | .u.b.read = xen_pt_byte_reg_read, |
| 923 | .u.b.write = xen_pt_byte_reg_write, |
| 924 | }, |
| 925 | /* Device Capabilities reg */ |
| 926 | { |
| 927 | .offset = PCI_EXP_DEVCAP, |
| 928 | .size = 4, |
| 929 | .init_val = 0x00000000, |
Jan Beulich | 45ebe39 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 930 | .ro_mask = 0xFFFFFFFF, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 931 | .emu_mask = 0x10000000, |
| 932 | .init = xen_pt_common_reg_init, |
| 933 | .u.dw.read = xen_pt_long_reg_read, |
| 934 | .u.dw.write = xen_pt_long_reg_write, |
| 935 | }, |
| 936 | /* Device Control reg */ |
| 937 | { |
| 938 | .offset = PCI_EXP_DEVCTL, |
| 939 | .size = 2, |
| 940 | .init_val = 0x2810, |
| 941 | .ro_mask = 0x8400, |
| 942 | .emu_mask = 0xFFFF, |
| 943 | .init = xen_pt_common_reg_init, |
| 944 | .u.w.read = xen_pt_word_reg_read, |
| 945 | .u.w.write = xen_pt_word_reg_write, |
| 946 | }, |
Jan Beulich | a88a3f8 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 947 | /* Device Status reg */ |
| 948 | { |
| 949 | .offset = PCI_EXP_DEVSTA, |
| 950 | .size = 2, |
| 951 | .res_mask = 0xFFC0, |
| 952 | .ro_mask = 0x0030, |
Jan Beulich | 55c8672 | 2015-12-09 15:47:28 +0000 | [diff] [blame] | 953 | .rw1c_mask = 0x000F, |
Jan Beulich | a88a3f8 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 954 | .init = xen_pt_common_reg_init, |
| 955 | .u.w.read = xen_pt_word_reg_read, |
| 956 | .u.w.write = xen_pt_word_reg_write, |
| 957 | }, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 958 | /* Link Control reg */ |
| 959 | { |
| 960 | .offset = PCI_EXP_LNKCTL, |
| 961 | .size = 2, |
| 962 | .init_val = 0x0000, |
| 963 | .ro_mask = 0xFC34, |
| 964 | .emu_mask = 0xFFFF, |
| 965 | .init = xen_pt_linkctrl_reg_init, |
| 966 | .u.w.read = xen_pt_word_reg_read, |
| 967 | .u.w.write = xen_pt_word_reg_write, |
| 968 | }, |
Jan Beulich | a88a3f8 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 969 | /* Link Status reg */ |
| 970 | { |
| 971 | .offset = PCI_EXP_LNKSTA, |
| 972 | .size = 2, |
| 973 | .ro_mask = 0x3FFF, |
Jan Beulich | 55c8672 | 2015-12-09 15:47:28 +0000 | [diff] [blame] | 974 | .rw1c_mask = 0xC000, |
Jan Beulich | a88a3f8 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 975 | .init = xen_pt_common_reg_init, |
| 976 | .u.w.read = xen_pt_word_reg_read, |
| 977 | .u.w.write = xen_pt_word_reg_write, |
| 978 | }, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 979 | /* Device Control 2 reg */ |
| 980 | { |
| 981 | .offset = 0x28, |
| 982 | .size = 2, |
| 983 | .init_val = 0x0000, |
| 984 | .ro_mask = 0xFFE0, |
| 985 | .emu_mask = 0xFFFF, |
| 986 | .init = xen_pt_devctrl2_reg_init, |
| 987 | .u.w.read = xen_pt_word_reg_read, |
| 988 | .u.w.write = xen_pt_word_reg_write, |
| 989 | }, |
| 990 | /* Link Control 2 reg */ |
| 991 | { |
| 992 | .offset = 0x30, |
| 993 | .size = 2, |
| 994 | .init_val = 0x0000, |
| 995 | .ro_mask = 0xE040, |
| 996 | .emu_mask = 0xFFFF, |
| 997 | .init = xen_pt_linkctrl2_reg_init, |
| 998 | .u.w.read = xen_pt_word_reg_read, |
| 999 | .u.w.write = xen_pt_word_reg_write, |
| 1000 | }, |
| 1001 | { |
| 1002 | .size = 0, |
| 1003 | }, |
| 1004 | }; |
| 1005 | |
| 1006 | |
| 1007 | /********************************* |
| 1008 | * Power Management Capability |
| 1009 | */ |
| 1010 | |
Stefan Weil | 0546b8c | 2012-08-10 22:03:25 +0200 | [diff] [blame] | 1011 | /* Power Management Capability reg static information table */ |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1012 | static XenPTRegInfo xen_pt_emu_reg_pm[] = { |
| 1013 | /* Next Pointer reg */ |
| 1014 | { |
| 1015 | .offset = PCI_CAP_LIST_NEXT, |
| 1016 | .size = 1, |
| 1017 | .init_val = 0x00, |
| 1018 | .ro_mask = 0xFF, |
| 1019 | .emu_mask = 0xFF, |
| 1020 | .init = xen_pt_ptr_reg_init, |
| 1021 | .u.b.read = xen_pt_byte_reg_read, |
| 1022 | .u.b.write = xen_pt_byte_reg_write, |
| 1023 | }, |
| 1024 | /* Power Management Capabilities reg */ |
| 1025 | { |
| 1026 | .offset = PCI_CAP_FLAGS, |
| 1027 | .size = 2, |
| 1028 | .init_val = 0x0000, |
| 1029 | .ro_mask = 0xFFFF, |
| 1030 | .emu_mask = 0xF9C8, |
| 1031 | .init = xen_pt_common_reg_init, |
| 1032 | .u.w.read = xen_pt_word_reg_read, |
| 1033 | .u.w.write = xen_pt_word_reg_write, |
| 1034 | }, |
| 1035 | /* PCI Power Management Control/Status reg */ |
| 1036 | { |
| 1037 | .offset = PCI_PM_CTRL, |
| 1038 | .size = 2, |
| 1039 | .init_val = 0x0008, |
Jan Beulich | 0ad3393 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1040 | .res_mask = 0x00F0, |
Jan Beulich | 55c8672 | 2015-12-09 15:47:28 +0000 | [diff] [blame] | 1041 | .ro_mask = 0x610C, |
| 1042 | .rw1c_mask = 0x8000, |
Jan Beulich | d61bb24 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1043 | .emu_mask = 0x810B, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1044 | .init = xen_pt_common_reg_init, |
Jan Beulich | d61bb24 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1045 | .u.w.read = xen_pt_word_reg_read, |
Jan Beulich | 55c8672 | 2015-12-09 15:47:28 +0000 | [diff] [blame] | 1046 | .u.w.write = xen_pt_word_reg_write, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1047 | }, |
| 1048 | { |
| 1049 | .size = 0, |
| 1050 | }, |
| 1051 | }; |
| 1052 | |
| 1053 | |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1054 | /******************************** |
| 1055 | * MSI Capability |
| 1056 | */ |
| 1057 | |
| 1058 | /* Helper */ |
Jan Beulich | 7611dae | 2015-06-02 15:07:00 +0000 | [diff] [blame] | 1059 | #define xen_pt_msi_check_type(offset, flags, what) \ |
| 1060 | ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \ |
| 1061 | PCI_MSI_##what##_64 : PCI_MSI_##what##_32)) |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1062 | |
| 1063 | /* Message Control register */ |
| 1064 | static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, |
| 1065 | XenPTRegInfo *reg, uint32_t real_offset, |
| 1066 | uint32_t *data) |
| 1067 | { |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1068 | XenPTMSI *msi = s->msi; |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1069 | uint16_t reg_field; |
| 1070 | int rc; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1071 | |
| 1072 | /* use I/O device register's value as initial value */ |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1073 | rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); |
| 1074 | if (rc) { |
| 1075 | return rc; |
| 1076 | } |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1077 | if (reg_field & PCI_MSI_FLAGS_ENABLE) { |
| 1078 | XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); |
| 1079 | xen_host_pci_set_word(&s->real_device, real_offset, |
| 1080 | reg_field & ~PCI_MSI_FLAGS_ENABLE); |
| 1081 | } |
| 1082 | msi->flags |= reg_field; |
| 1083 | msi->ctrl_offset = real_offset; |
| 1084 | msi->initialized = false; |
| 1085 | msi->mapped = false; |
| 1086 | |
| 1087 | *data = reg->init_val; |
| 1088 | return 0; |
| 1089 | } |
| 1090 | static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, |
| 1091 | XenPTReg *cfg_entry, uint16_t *val, |
| 1092 | uint16_t dev_value, uint16_t valid_mask) |
| 1093 | { |
| 1094 | XenPTRegInfo *reg = cfg_entry->reg; |
| 1095 | XenPTMSI *msi = s->msi; |
| 1096 | uint16_t writable_mask = 0; |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1097 | uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1098 | uint16_t *data = cfg_entry->ptr.half_word; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1099 | |
| 1100 | /* Currently no support for multi-vector */ |
| 1101 | if (*val & PCI_MSI_FLAGS_QSIZE) { |
| 1102 | XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); |
| 1103 | } |
| 1104 | |
| 1105 | /* modify emulate register */ |
| 1106 | writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1107 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
| 1108 | msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1109 | |
| 1110 | /* create value for writing to I/O device register */ |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1111 | *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
| 1112 | |
| 1113 | /* update MSI */ |
Jan Beulich | d1d35cf | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1114 | if (*val & PCI_MSI_FLAGS_ENABLE) { |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1115 | /* setup MSI pirq for the first time */ |
| 1116 | if (!msi->initialized) { |
| 1117 | /* Init physical one */ |
Konrad Rzeszutek Wilk | faf5f56 | 2015-06-29 12:30:37 -0400 | [diff] [blame] | 1118 | XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1119 | if (xen_pt_msi_setup(s)) { |
| 1120 | /* We do not broadcast the error to the framework code, so |
| 1121 | * that MSI errors are contained in MSI emulation code and |
| 1122 | * QEMU can go on running. |
| 1123 | * Guest MSI would be actually not working. |
| 1124 | */ |
| 1125 | *val &= ~PCI_MSI_FLAGS_ENABLE; |
Konrad Rzeszutek Wilk | faf5f56 | 2015-06-29 12:30:37 -0400 | [diff] [blame] | 1126 | XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1127 | return 0; |
| 1128 | } |
| 1129 | if (xen_pt_msi_update(s)) { |
| 1130 | *val &= ~PCI_MSI_FLAGS_ENABLE; |
Konrad Rzeszutek Wilk | faf5f56 | 2015-06-29 12:30:37 -0400 | [diff] [blame] | 1131 | XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1132 | return 0; |
| 1133 | } |
| 1134 | msi->initialized = true; |
| 1135 | msi->mapped = true; |
| 1136 | } |
| 1137 | msi->flags |= PCI_MSI_FLAGS_ENABLE; |
Zhenzhong Duan | c976437 | 2014-05-07 13:41:48 +0000 | [diff] [blame] | 1138 | } else if (msi->mapped) { |
| 1139 | xen_pt_msi_disable(s); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1140 | } |
| 1141 | |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1142 | return 0; |
| 1143 | } |
| 1144 | |
| 1145 | /* initialize Message Upper Address register */ |
| 1146 | static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, |
| 1147 | XenPTRegInfo *reg, uint32_t real_offset, |
| 1148 | uint32_t *data) |
| 1149 | { |
| 1150 | /* no need to initialize in case of 32 bit type */ |
| 1151 | if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { |
| 1152 | *data = XEN_PT_INVALID_REG; |
| 1153 | } else { |
| 1154 | *data = reg->init_val; |
| 1155 | } |
| 1156 | |
| 1157 | return 0; |
| 1158 | } |
| 1159 | /* this function will be called twice (for 32 bit and 64 bit type) */ |
| 1160 | /* initialize Message Data register */ |
| 1161 | static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, |
| 1162 | XenPTRegInfo *reg, uint32_t real_offset, |
| 1163 | uint32_t *data) |
| 1164 | { |
| 1165 | uint32_t flags = s->msi->flags; |
| 1166 | uint32_t offset = reg->offset; |
| 1167 | |
| 1168 | /* check the offset whether matches the type or not */ |
Jan Beulich | 7611dae | 2015-06-02 15:07:00 +0000 | [diff] [blame] | 1169 | if (xen_pt_msi_check_type(offset, flags, DATA)) { |
| 1170 | *data = reg->init_val; |
| 1171 | } else { |
| 1172 | *data = XEN_PT_INVALID_REG; |
| 1173 | } |
| 1174 | return 0; |
| 1175 | } |
| 1176 | |
| 1177 | /* this function will be called twice (for 32 bit and 64 bit type) */ |
| 1178 | /* initialize Mask register */ |
| 1179 | static int xen_pt_mask_reg_init(XenPCIPassthroughState *s, |
| 1180 | XenPTRegInfo *reg, uint32_t real_offset, |
| 1181 | uint32_t *data) |
| 1182 | { |
| 1183 | uint32_t flags = s->msi->flags; |
| 1184 | |
| 1185 | /* check the offset whether matches the type or not */ |
| 1186 | if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { |
| 1187 | *data = XEN_PT_INVALID_REG; |
| 1188 | } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) { |
| 1189 | *data = reg->init_val; |
| 1190 | } else { |
| 1191 | *data = XEN_PT_INVALID_REG; |
| 1192 | } |
| 1193 | return 0; |
| 1194 | } |
| 1195 | |
| 1196 | /* this function will be called twice (for 32 bit and 64 bit type) */ |
| 1197 | /* initialize Pending register */ |
| 1198 | static int xen_pt_pending_reg_init(XenPCIPassthroughState *s, |
| 1199 | XenPTRegInfo *reg, uint32_t real_offset, |
| 1200 | uint32_t *data) |
| 1201 | { |
| 1202 | uint32_t flags = s->msi->flags; |
| 1203 | |
| 1204 | /* check the offset whether matches the type or not */ |
| 1205 | if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { |
| 1206 | *data = XEN_PT_INVALID_REG; |
| 1207 | } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) { |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1208 | *data = reg->init_val; |
| 1209 | } else { |
| 1210 | *data = XEN_PT_INVALID_REG; |
| 1211 | } |
| 1212 | return 0; |
| 1213 | } |
| 1214 | |
| 1215 | /* write Message Address register */ |
| 1216 | static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, |
| 1217 | XenPTReg *cfg_entry, uint32_t *val, |
| 1218 | uint32_t dev_value, uint32_t valid_mask) |
| 1219 | { |
| 1220 | XenPTRegInfo *reg = cfg_entry->reg; |
| 1221 | uint32_t writable_mask = 0; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1222 | uint32_t old_addr = *cfg_entry->ptr.word; |
| 1223 | uint32_t *data = cfg_entry->ptr.word; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1224 | |
| 1225 | /* modify emulate register */ |
| 1226 | writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1227 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
| 1228 | s->msi->addr_lo = *data; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1229 | |
| 1230 | /* create value for writing to I/O device register */ |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1231 | *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1232 | |
| 1233 | /* update MSI */ |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1234 | if (*data != old_addr) { |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1235 | if (s->msi->mapped) { |
| 1236 | xen_pt_msi_update(s); |
| 1237 | } |
| 1238 | } |
| 1239 | |
| 1240 | return 0; |
| 1241 | } |
| 1242 | /* write Message Upper Address register */ |
| 1243 | static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, |
| 1244 | XenPTReg *cfg_entry, uint32_t *val, |
| 1245 | uint32_t dev_value, uint32_t valid_mask) |
| 1246 | { |
| 1247 | XenPTRegInfo *reg = cfg_entry->reg; |
| 1248 | uint32_t writable_mask = 0; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1249 | uint32_t old_addr = *cfg_entry->ptr.word; |
| 1250 | uint32_t *data = cfg_entry->ptr.word; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1251 | |
| 1252 | /* check whether the type is 64 bit or not */ |
| 1253 | if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { |
| 1254 | XEN_PT_ERR(&s->dev, |
| 1255 | "Can't write to the upper address without 64 bit support\n"); |
| 1256 | return -1; |
| 1257 | } |
| 1258 | |
| 1259 | /* modify emulate register */ |
| 1260 | writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1261 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1262 | /* update the msi_info too */ |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1263 | s->msi->addr_hi = *data; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1264 | |
| 1265 | /* create value for writing to I/O device register */ |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1266 | *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1267 | |
| 1268 | /* update MSI */ |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1269 | if (*data != old_addr) { |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1270 | if (s->msi->mapped) { |
| 1271 | xen_pt_msi_update(s); |
| 1272 | } |
| 1273 | } |
| 1274 | |
| 1275 | return 0; |
| 1276 | } |
| 1277 | |
| 1278 | |
| 1279 | /* this function will be called twice (for 32 bit and 64 bit type) */ |
| 1280 | /* write Message Data register */ |
| 1281 | static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, |
| 1282 | XenPTReg *cfg_entry, uint16_t *val, |
| 1283 | uint16_t dev_value, uint16_t valid_mask) |
| 1284 | { |
| 1285 | XenPTRegInfo *reg = cfg_entry->reg; |
| 1286 | XenPTMSI *msi = s->msi; |
| 1287 | uint16_t writable_mask = 0; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1288 | uint16_t old_data = *cfg_entry->ptr.half_word; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1289 | uint32_t offset = reg->offset; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1290 | uint16_t *data = cfg_entry->ptr.half_word; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1291 | |
| 1292 | /* check the offset whether matches the type or not */ |
Jan Beulich | 7611dae | 2015-06-02 15:07:00 +0000 | [diff] [blame] | 1293 | if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) { |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1294 | /* exit I/O emulator */ |
| 1295 | XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); |
| 1296 | return -1; |
| 1297 | } |
| 1298 | |
| 1299 | /* modify emulate register */ |
| 1300 | writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1301 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1302 | /* update the msi_info too */ |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1303 | msi->data = *data; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1304 | |
| 1305 | /* create value for writing to I/O device register */ |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1306 | *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1307 | |
| 1308 | /* update MSI */ |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1309 | if (*data != old_data) { |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1310 | if (msi->mapped) { |
| 1311 | xen_pt_msi_update(s); |
| 1312 | } |
| 1313 | } |
| 1314 | |
| 1315 | return 0; |
| 1316 | } |
| 1317 | |
Roger Pau Monne | a803633 | 2017-08-24 16:07:03 +0100 | [diff] [blame] | 1318 | static int xen_pt_mask_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, |
| 1319 | uint32_t *val, uint32_t dev_value, |
| 1320 | uint32_t valid_mask) |
| 1321 | { |
| 1322 | int rc; |
| 1323 | |
| 1324 | rc = xen_pt_long_reg_write(s, cfg_entry, val, dev_value, valid_mask); |
| 1325 | if (rc) { |
| 1326 | return rc; |
| 1327 | } |
| 1328 | |
| 1329 | s->msi->mask = *val; |
| 1330 | |
| 1331 | return 0; |
| 1332 | } |
| 1333 | |
Stefan Weil | 0546b8c | 2012-08-10 22:03:25 +0200 | [diff] [blame] | 1334 | /* MSI Capability Structure reg static information table */ |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1335 | static XenPTRegInfo xen_pt_emu_reg_msi[] = { |
| 1336 | /* Next Pointer reg */ |
| 1337 | { |
| 1338 | .offset = PCI_CAP_LIST_NEXT, |
| 1339 | .size = 1, |
| 1340 | .init_val = 0x00, |
| 1341 | .ro_mask = 0xFF, |
| 1342 | .emu_mask = 0xFF, |
| 1343 | .init = xen_pt_ptr_reg_init, |
| 1344 | .u.b.read = xen_pt_byte_reg_read, |
| 1345 | .u.b.write = xen_pt_byte_reg_write, |
| 1346 | }, |
| 1347 | /* Message Control reg */ |
| 1348 | { |
| 1349 | .offset = PCI_MSI_FLAGS, |
| 1350 | .size = 2, |
| 1351 | .init_val = 0x0000, |
Jan Beulich | 0ad3393 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1352 | .res_mask = 0xFE00, |
| 1353 | .ro_mask = 0x018E, |
Jan Beulich | d1d35cf | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1354 | .emu_mask = 0x017E, |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1355 | .init = xen_pt_msgctrl_reg_init, |
| 1356 | .u.w.read = xen_pt_word_reg_read, |
| 1357 | .u.w.write = xen_pt_msgctrl_reg_write, |
| 1358 | }, |
| 1359 | /* Message Address reg */ |
| 1360 | { |
| 1361 | .offset = PCI_MSI_ADDRESS_LO, |
| 1362 | .size = 4, |
| 1363 | .init_val = 0x00000000, |
| 1364 | .ro_mask = 0x00000003, |
| 1365 | .emu_mask = 0xFFFFFFFF, |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1366 | .init = xen_pt_common_reg_init, |
| 1367 | .u.dw.read = xen_pt_long_reg_read, |
| 1368 | .u.dw.write = xen_pt_msgaddr32_reg_write, |
| 1369 | }, |
| 1370 | /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ |
| 1371 | { |
| 1372 | .offset = PCI_MSI_ADDRESS_HI, |
| 1373 | .size = 4, |
| 1374 | .init_val = 0x00000000, |
| 1375 | .ro_mask = 0x00000000, |
| 1376 | .emu_mask = 0xFFFFFFFF, |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1377 | .init = xen_pt_msgaddr64_reg_init, |
| 1378 | .u.dw.read = xen_pt_long_reg_read, |
| 1379 | .u.dw.write = xen_pt_msgaddr64_reg_write, |
| 1380 | }, |
| 1381 | /* Message Data reg (16 bits of data for 32-bit devices) */ |
| 1382 | { |
| 1383 | .offset = PCI_MSI_DATA_32, |
| 1384 | .size = 2, |
| 1385 | .init_val = 0x0000, |
| 1386 | .ro_mask = 0x0000, |
| 1387 | .emu_mask = 0xFFFF, |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1388 | .init = xen_pt_msgdata_reg_init, |
| 1389 | .u.w.read = xen_pt_word_reg_read, |
| 1390 | .u.w.write = xen_pt_msgdata_reg_write, |
| 1391 | }, |
| 1392 | /* Message Data reg (16 bits of data for 64-bit devices) */ |
| 1393 | { |
| 1394 | .offset = PCI_MSI_DATA_64, |
| 1395 | .size = 2, |
| 1396 | .init_val = 0x0000, |
| 1397 | .ro_mask = 0x0000, |
| 1398 | .emu_mask = 0xFFFF, |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1399 | .init = xen_pt_msgdata_reg_init, |
| 1400 | .u.w.read = xen_pt_word_reg_read, |
| 1401 | .u.w.write = xen_pt_msgdata_reg_write, |
| 1402 | }, |
Jan Beulich | 7611dae | 2015-06-02 15:07:00 +0000 | [diff] [blame] | 1403 | /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ |
| 1404 | { |
| 1405 | .offset = PCI_MSI_MASK_32, |
| 1406 | .size = 4, |
| 1407 | .init_val = 0x00000000, |
| 1408 | .ro_mask = 0xFFFFFFFF, |
| 1409 | .emu_mask = 0xFFFFFFFF, |
| 1410 | .init = xen_pt_mask_reg_init, |
| 1411 | .u.dw.read = xen_pt_long_reg_read, |
Roger Pau Monne | a803633 | 2017-08-24 16:07:03 +0100 | [diff] [blame] | 1412 | .u.dw.write = xen_pt_mask_reg_write, |
Jan Beulich | 7611dae | 2015-06-02 15:07:00 +0000 | [diff] [blame] | 1413 | }, |
| 1414 | /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ |
| 1415 | { |
| 1416 | .offset = PCI_MSI_MASK_64, |
| 1417 | .size = 4, |
| 1418 | .init_val = 0x00000000, |
| 1419 | .ro_mask = 0xFFFFFFFF, |
| 1420 | .emu_mask = 0xFFFFFFFF, |
| 1421 | .init = xen_pt_mask_reg_init, |
| 1422 | .u.dw.read = xen_pt_long_reg_read, |
Roger Pau Monne | a803633 | 2017-08-24 16:07:03 +0100 | [diff] [blame] | 1423 | .u.dw.write = xen_pt_mask_reg_write, |
Jan Beulich | 7611dae | 2015-06-02 15:07:00 +0000 | [diff] [blame] | 1424 | }, |
| 1425 | /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ |
| 1426 | { |
| 1427 | .offset = PCI_MSI_MASK_32 + 4, |
| 1428 | .size = 4, |
| 1429 | .init_val = 0x00000000, |
| 1430 | .ro_mask = 0xFFFFFFFF, |
| 1431 | .emu_mask = 0x00000000, |
| 1432 | .init = xen_pt_pending_reg_init, |
| 1433 | .u.dw.read = xen_pt_long_reg_read, |
| 1434 | .u.dw.write = xen_pt_long_reg_write, |
| 1435 | }, |
| 1436 | /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ |
| 1437 | { |
| 1438 | .offset = PCI_MSI_MASK_64 + 4, |
| 1439 | .size = 4, |
| 1440 | .init_val = 0x00000000, |
| 1441 | .ro_mask = 0xFFFFFFFF, |
| 1442 | .emu_mask = 0x00000000, |
| 1443 | .init = xen_pt_pending_reg_init, |
| 1444 | .u.dw.read = xen_pt_long_reg_read, |
| 1445 | .u.dw.write = xen_pt_long_reg_write, |
| 1446 | }, |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1447 | { |
| 1448 | .size = 0, |
| 1449 | }, |
| 1450 | }; |
| 1451 | |
| 1452 | |
| 1453 | /************************************** |
| 1454 | * MSI-X Capability |
| 1455 | */ |
| 1456 | |
| 1457 | /* Message Control register for MSI-X */ |
| 1458 | static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, |
| 1459 | XenPTRegInfo *reg, uint32_t real_offset, |
| 1460 | uint32_t *data) |
| 1461 | { |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1462 | uint16_t reg_field; |
| 1463 | int rc; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1464 | |
| 1465 | /* use I/O device register's value as initial value */ |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1466 | rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); |
| 1467 | if (rc) { |
| 1468 | return rc; |
| 1469 | } |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1470 | if (reg_field & PCI_MSIX_FLAGS_ENABLE) { |
Konrad Rzeszutek Wilk | 54fd081 | 2015-06-29 16:06:19 -0400 | [diff] [blame] | 1471 | XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n"); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1472 | xen_host_pci_set_word(&s->real_device, real_offset, |
| 1473 | reg_field & ~PCI_MSIX_FLAGS_ENABLE); |
| 1474 | } |
| 1475 | |
| 1476 | s->msix->ctrl_offset = real_offset; |
| 1477 | |
| 1478 | *data = reg->init_val; |
| 1479 | return 0; |
| 1480 | } |
| 1481 | static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, |
| 1482 | XenPTReg *cfg_entry, uint16_t *val, |
| 1483 | uint16_t dev_value, uint16_t valid_mask) |
| 1484 | { |
| 1485 | XenPTRegInfo *reg = cfg_entry->reg; |
| 1486 | uint16_t writable_mask = 0; |
Jan Beulich | 0e7ef22 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1487 | uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1488 | int debug_msix_enabled_old; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1489 | uint16_t *data = cfg_entry->ptr.half_word; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1490 | |
| 1491 | /* modify emulate register */ |
| 1492 | writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1493 | *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1494 | |
| 1495 | /* create value for writing to I/O device register */ |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1496 | *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); |
| 1497 | |
| 1498 | /* update MSI-X */ |
| 1499 | if ((*val & PCI_MSIX_FLAGS_ENABLE) |
| 1500 | && !(*val & PCI_MSIX_FLAGS_MASKALL)) { |
| 1501 | xen_pt_msix_update(s); |
Zhenzhong Duan | c976437 | 2014-05-07 13:41:48 +0000 | [diff] [blame] | 1502 | } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) { |
| 1503 | xen_pt_msix_disable(s); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1504 | } |
| 1505 | |
Jan Beulich | f0ada36 | 2015-12-09 15:45:29 +0000 | [diff] [blame] | 1506 | s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL; |
| 1507 | |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1508 | debug_msix_enabled_old = s->msix->enabled; |
| 1509 | s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); |
| 1510 | if (s->msix->enabled != debug_msix_enabled_old) { |
| 1511 | XEN_PT_LOG(&s->dev, "%s MSI-X\n", |
| 1512 | s->msix->enabled ? "enable" : "disable"); |
| 1513 | } |
| 1514 | |
| 1515 | return 0; |
| 1516 | } |
| 1517 | |
Stefan Weil | 0546b8c | 2012-08-10 22:03:25 +0200 | [diff] [blame] | 1518 | /* MSI-X Capability Structure reg static information table */ |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1519 | static XenPTRegInfo xen_pt_emu_reg_msix[] = { |
| 1520 | /* Next Pointer reg */ |
| 1521 | { |
| 1522 | .offset = PCI_CAP_LIST_NEXT, |
| 1523 | .size = 1, |
| 1524 | .init_val = 0x00, |
| 1525 | .ro_mask = 0xFF, |
| 1526 | .emu_mask = 0xFF, |
| 1527 | .init = xen_pt_ptr_reg_init, |
| 1528 | .u.b.read = xen_pt_byte_reg_read, |
| 1529 | .u.b.write = xen_pt_byte_reg_write, |
| 1530 | }, |
| 1531 | /* Message Control reg */ |
| 1532 | { |
| 1533 | .offset = PCI_MSI_FLAGS, |
| 1534 | .size = 2, |
| 1535 | .init_val = 0x0000, |
Jan Beulich | 0ad3393 | 2015-06-02 15:07:01 +0000 | [diff] [blame] | 1536 | .res_mask = 0x3800, |
| 1537 | .ro_mask = 0x07FF, |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1538 | .emu_mask = 0x0000, |
| 1539 | .init = xen_pt_msixctrl_reg_init, |
| 1540 | .u.w.read = xen_pt_word_reg_read, |
| 1541 | .u.w.write = xen_pt_msixctrl_reg_write, |
| 1542 | }, |
| 1543 | { |
| 1544 | .size = 0, |
| 1545 | }, |
| 1546 | }; |
| 1547 | |
Tiejun Chen | 5cec8aa | 2015-07-15 13:37:50 +0800 | [diff] [blame] | 1548 | static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = { |
| 1549 | /* Intel IGFX OpRegion reg */ |
| 1550 | { |
| 1551 | .offset = 0x0, |
| 1552 | .size = 4, |
| 1553 | .init_val = 0, |
Xiong Zhang | a19bae4 | 2017-07-07 12:07:58 +0800 | [diff] [blame] | 1554 | .emu_mask = 0xFFFFFFFF, |
Tiejun Chen | 5cec8aa | 2015-07-15 13:37:50 +0800 | [diff] [blame] | 1555 | .u.dw.read = xen_pt_intel_opregion_read, |
| 1556 | .u.dw.write = xen_pt_intel_opregion_write, |
| 1557 | }, |
| 1558 | { |
| 1559 | .size = 0, |
| 1560 | }, |
| 1561 | }; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1562 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1563 | /**************************** |
| 1564 | * Capabilities |
| 1565 | */ |
| 1566 | |
| 1567 | /* capability structure register group size functions */ |
| 1568 | |
| 1569 | static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, |
| 1570 | const XenPTRegGroupInfo *grp_reg, |
| 1571 | uint32_t base_offset, uint8_t *size) |
| 1572 | { |
| 1573 | *size = grp_reg->grp_size; |
| 1574 | return 0; |
| 1575 | } |
| 1576 | /* get Vendor Specific Capability Structure register group size */ |
| 1577 | static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, |
| 1578 | const XenPTRegGroupInfo *grp_reg, |
| 1579 | uint32_t base_offset, uint8_t *size) |
| 1580 | { |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1581 | return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1582 | } |
| 1583 | /* get PCI Express Capability Structure register group size */ |
| 1584 | static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, |
| 1585 | const XenPTRegGroupInfo *grp_reg, |
| 1586 | uint32_t base_offset, uint8_t *size) |
| 1587 | { |
| 1588 | PCIDevice *d = &s->dev; |
| 1589 | uint8_t version = get_capability_version(s, base_offset); |
| 1590 | uint8_t type = get_device_type(s, base_offset); |
| 1591 | uint8_t pcie_size = 0; |
| 1592 | |
| 1593 | |
| 1594 | /* calculate size depend on capability version and device/port type */ |
| 1595 | /* in case of PCI Express Base Specification Rev 1.x */ |
| 1596 | if (version == 1) { |
| 1597 | /* The PCI Express Capabilities, Device Capabilities, and Device |
| 1598 | * Status/Control registers are required for all PCI Express devices. |
| 1599 | * The Link Capabilities and Link Status/Control are required for all |
| 1600 | * Endpoints that are not Root Complex Integrated Endpoints. Endpoints |
| 1601 | * are not required to implement registers other than those listed |
| 1602 | * above and terminate the capability structure. |
| 1603 | */ |
| 1604 | switch (type) { |
| 1605 | case PCI_EXP_TYPE_ENDPOINT: |
| 1606 | case PCI_EXP_TYPE_LEG_END: |
| 1607 | pcie_size = 0x14; |
| 1608 | break; |
| 1609 | case PCI_EXP_TYPE_RC_END: |
| 1610 | /* has no link */ |
| 1611 | pcie_size = 0x0C; |
| 1612 | break; |
| 1613 | /* only EndPoint passthrough is supported */ |
| 1614 | case PCI_EXP_TYPE_ROOT_PORT: |
| 1615 | case PCI_EXP_TYPE_UPSTREAM: |
| 1616 | case PCI_EXP_TYPE_DOWNSTREAM: |
| 1617 | case PCI_EXP_TYPE_PCI_BRIDGE: |
| 1618 | case PCI_EXP_TYPE_PCIE_BRIDGE: |
| 1619 | case PCI_EXP_TYPE_RC_EC: |
| 1620 | default: |
| 1621 | XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); |
| 1622 | return -1; |
| 1623 | } |
| 1624 | } |
| 1625 | /* in case of PCI Express Base Specification Rev 2.0 */ |
| 1626 | else if (version == 2) { |
| 1627 | switch (type) { |
| 1628 | case PCI_EXP_TYPE_ENDPOINT: |
| 1629 | case PCI_EXP_TYPE_LEG_END: |
| 1630 | case PCI_EXP_TYPE_RC_END: |
| 1631 | /* For Functions that do not implement the registers, |
| 1632 | * these spaces must be hardwired to 0b. |
| 1633 | */ |
| 1634 | pcie_size = 0x3C; |
| 1635 | break; |
| 1636 | /* only EndPoint passthrough is supported */ |
| 1637 | case PCI_EXP_TYPE_ROOT_PORT: |
| 1638 | case PCI_EXP_TYPE_UPSTREAM: |
| 1639 | case PCI_EXP_TYPE_DOWNSTREAM: |
| 1640 | case PCI_EXP_TYPE_PCI_BRIDGE: |
| 1641 | case PCI_EXP_TYPE_PCIE_BRIDGE: |
| 1642 | case PCI_EXP_TYPE_RC_EC: |
| 1643 | default: |
| 1644 | XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); |
| 1645 | return -1; |
| 1646 | } |
| 1647 | } else { |
| 1648 | XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version); |
| 1649 | return -1; |
| 1650 | } |
| 1651 | |
| 1652 | *size = pcie_size; |
| 1653 | return 0; |
| 1654 | } |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1655 | /* get MSI Capability Structure register group size */ |
| 1656 | static int xen_pt_msi_size_init(XenPCIPassthroughState *s, |
| 1657 | const XenPTRegGroupInfo *grp_reg, |
| 1658 | uint32_t base_offset, uint8_t *size) |
| 1659 | { |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1660 | uint16_t msg_ctrl = 0; |
| 1661 | uint8_t msi_size = 0xa; |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1662 | int rc; |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1663 | |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1664 | rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS, |
| 1665 | &msg_ctrl); |
| 1666 | if (rc) { |
| 1667 | return rc; |
| 1668 | } |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1669 | /* check if 64-bit address is capable of per-vector masking */ |
| 1670 | if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { |
| 1671 | msi_size += 4; |
| 1672 | } |
| 1673 | if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { |
| 1674 | msi_size += 10; |
| 1675 | } |
| 1676 | |
| 1677 | s->msi = g_new0(XenPTMSI, 1); |
| 1678 | s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; |
| 1679 | |
| 1680 | *size = msi_size; |
| 1681 | return 0; |
| 1682 | } |
| 1683 | /* get MSI-X Capability Structure register group size */ |
| 1684 | static int xen_pt_msix_size_init(XenPCIPassthroughState *s, |
| 1685 | const XenPTRegGroupInfo *grp_reg, |
| 1686 | uint32_t base_offset, uint8_t *size) |
| 1687 | { |
| 1688 | int rc = 0; |
| 1689 | |
| 1690 | rc = xen_pt_msix_init(s, base_offset); |
| 1691 | |
| 1692 | if (rc < 0) { |
| 1693 | XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); |
| 1694 | return rc; |
| 1695 | } |
| 1696 | |
| 1697 | *size = grp_reg->grp_size; |
| 1698 | return 0; |
| 1699 | } |
| 1700 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1701 | |
| 1702 | static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { |
| 1703 | /* Header Type0 reg group */ |
| 1704 | { |
| 1705 | .grp_id = 0xFF, |
| 1706 | .grp_type = XEN_PT_GRP_TYPE_EMU, |
| 1707 | .grp_size = 0x40, |
| 1708 | .size_init = xen_pt_reg_grp_size_init, |
| 1709 | .emu_regs = xen_pt_emu_reg_header0, |
| 1710 | }, |
| 1711 | /* PCI PowerManagement Capability reg group */ |
| 1712 | { |
| 1713 | .grp_id = PCI_CAP_ID_PM, |
| 1714 | .grp_type = XEN_PT_GRP_TYPE_EMU, |
| 1715 | .grp_size = PCI_PM_SIZEOF, |
| 1716 | .size_init = xen_pt_reg_grp_size_init, |
| 1717 | .emu_regs = xen_pt_emu_reg_pm, |
| 1718 | }, |
| 1719 | /* AGP Capability Structure reg group */ |
| 1720 | { |
| 1721 | .grp_id = PCI_CAP_ID_AGP, |
| 1722 | .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
| 1723 | .grp_size = 0x30, |
| 1724 | .size_init = xen_pt_reg_grp_size_init, |
| 1725 | }, |
| 1726 | /* Vital Product Data Capability Structure reg group */ |
| 1727 | { |
| 1728 | .grp_id = PCI_CAP_ID_VPD, |
| 1729 | .grp_type = XEN_PT_GRP_TYPE_EMU, |
| 1730 | .grp_size = 0x08, |
| 1731 | .size_init = xen_pt_reg_grp_size_init, |
| 1732 | .emu_regs = xen_pt_emu_reg_vpd, |
| 1733 | }, |
| 1734 | /* Slot Identification reg group */ |
| 1735 | { |
| 1736 | .grp_id = PCI_CAP_ID_SLOTID, |
| 1737 | .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
| 1738 | .grp_size = 0x04, |
| 1739 | .size_init = xen_pt_reg_grp_size_init, |
| 1740 | }, |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1741 | /* MSI Capability Structure reg group */ |
| 1742 | { |
| 1743 | .grp_id = PCI_CAP_ID_MSI, |
| 1744 | .grp_type = XEN_PT_GRP_TYPE_EMU, |
| 1745 | .grp_size = 0xFF, |
| 1746 | .size_init = xen_pt_msi_size_init, |
| 1747 | .emu_regs = xen_pt_emu_reg_msi, |
| 1748 | }, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1749 | /* PCI-X Capabilities List Item reg group */ |
| 1750 | { |
| 1751 | .grp_id = PCI_CAP_ID_PCIX, |
| 1752 | .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
| 1753 | .grp_size = 0x18, |
| 1754 | .size_init = xen_pt_reg_grp_size_init, |
| 1755 | }, |
| 1756 | /* Vendor Specific Capability Structure reg group */ |
| 1757 | { |
| 1758 | .grp_id = PCI_CAP_ID_VNDR, |
| 1759 | .grp_type = XEN_PT_GRP_TYPE_EMU, |
| 1760 | .grp_size = 0xFF, |
| 1761 | .size_init = xen_pt_vendor_size_init, |
| 1762 | .emu_regs = xen_pt_emu_reg_vendor, |
| 1763 | }, |
| 1764 | /* SHPC Capability List Item reg group */ |
| 1765 | { |
| 1766 | .grp_id = PCI_CAP_ID_SHPC, |
| 1767 | .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
| 1768 | .grp_size = 0x08, |
| 1769 | .size_init = xen_pt_reg_grp_size_init, |
| 1770 | }, |
| 1771 | /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ |
| 1772 | { |
| 1773 | .grp_id = PCI_CAP_ID_SSVID, |
| 1774 | .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
| 1775 | .grp_size = 0x08, |
| 1776 | .size_init = xen_pt_reg_grp_size_init, |
| 1777 | }, |
| 1778 | /* AGP 8x Capability Structure reg group */ |
| 1779 | { |
| 1780 | .grp_id = PCI_CAP_ID_AGP3, |
| 1781 | .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, |
| 1782 | .grp_size = 0x30, |
| 1783 | .size_init = xen_pt_reg_grp_size_init, |
| 1784 | }, |
| 1785 | /* PCI Express Capability Structure reg group */ |
| 1786 | { |
| 1787 | .grp_id = PCI_CAP_ID_EXP, |
| 1788 | .grp_type = XEN_PT_GRP_TYPE_EMU, |
| 1789 | .grp_size = 0xFF, |
| 1790 | .size_init = xen_pt_pcie_size_init, |
| 1791 | .emu_regs = xen_pt_emu_reg_pcie, |
| 1792 | }, |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 1793 | /* MSI-X Capability Structure reg group */ |
| 1794 | { |
| 1795 | .grp_id = PCI_CAP_ID_MSIX, |
| 1796 | .grp_type = XEN_PT_GRP_TYPE_EMU, |
| 1797 | .grp_size = 0x0C, |
| 1798 | .size_init = xen_pt_msix_size_init, |
| 1799 | .emu_regs = xen_pt_emu_reg_msix, |
| 1800 | }, |
Tiejun Chen | 5cec8aa | 2015-07-15 13:37:50 +0800 | [diff] [blame] | 1801 | /* Intel IGD Opregion group */ |
| 1802 | { |
| 1803 | .grp_id = XEN_PCI_INTEL_OPREGION, |
| 1804 | .grp_type = XEN_PT_GRP_TYPE_EMU, |
| 1805 | .grp_size = 0x4, |
| 1806 | .size_init = xen_pt_reg_grp_size_init, |
| 1807 | .emu_regs = xen_pt_emu_reg_igd_opregion, |
| 1808 | }, |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1809 | { |
| 1810 | .grp_size = 0, |
| 1811 | }, |
| 1812 | }; |
| 1813 | |
| 1814 | /* initialize Capabilities Pointer or Next Pointer register */ |
| 1815 | static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, |
| 1816 | XenPTRegInfo *reg, uint32_t real_offset, |
| 1817 | uint32_t *data) |
| 1818 | { |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1819 | int i, rc; |
| 1820 | uint8_t reg_field; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1821 | uint8_t cap_id = 0; |
| 1822 | |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1823 | rc = xen_host_pci_get_byte(&s->real_device, real_offset, ®_field); |
| 1824 | if (rc) { |
| 1825 | return rc; |
| 1826 | } |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1827 | /* find capability offset */ |
| 1828 | while (reg_field) { |
| 1829 | for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { |
| 1830 | if (xen_pt_hide_dev_cap(&s->real_device, |
| 1831 | xen_pt_emu_reg_grps[i].grp_id)) { |
| 1832 | continue; |
| 1833 | } |
| 1834 | |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1835 | rc = xen_host_pci_get_byte(&s->real_device, |
| 1836 | reg_field + PCI_CAP_LIST_ID, &cap_id); |
| 1837 | if (rc) { |
Konrad Rzeszutek Wilk | ea6c50f | 2015-06-24 17:18:26 -0400 | [diff] [blame] | 1838 | XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n", |
| 1839 | reg_field + PCI_CAP_LIST_ID, rc); |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1840 | return rc; |
| 1841 | } |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1842 | if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { |
| 1843 | if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { |
| 1844 | goto out; |
| 1845 | } |
| 1846 | /* ignore the 0 hardwired capability, find next one */ |
| 1847 | break; |
| 1848 | } |
| 1849 | } |
| 1850 | |
| 1851 | /* next capability */ |
Konrad Rzeszutek Wilk | 6aa07b1 | 2015-06-29 14:01:13 -0400 | [diff] [blame] | 1852 | rc = xen_host_pci_get_byte(&s->real_device, |
| 1853 | reg_field + PCI_CAP_LIST_NEXT, ®_field); |
| 1854 | if (rc) { |
| 1855 | return rc; |
| 1856 | } |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1857 | } |
| 1858 | |
| 1859 | out: |
| 1860 | *data = reg_field; |
| 1861 | return 0; |
| 1862 | } |
| 1863 | |
| 1864 | |
| 1865 | /************* |
| 1866 | * Main |
| 1867 | */ |
| 1868 | |
| 1869 | static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) |
| 1870 | { |
| 1871 | uint8_t id; |
Tiejun Chen | 5cec8aa | 2015-07-15 13:37:50 +0800 | [diff] [blame] | 1872 | unsigned max_cap = XEN_PCI_CAP_MAX; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1873 | uint8_t pos = PCI_CAPABILITY_LIST; |
| 1874 | uint8_t status = 0; |
| 1875 | |
| 1876 | if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { |
| 1877 | return 0; |
| 1878 | } |
| 1879 | if ((status & PCI_STATUS_CAP_LIST) == 0) { |
| 1880 | return 0; |
| 1881 | } |
| 1882 | |
| 1883 | while (max_cap--) { |
| 1884 | if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { |
| 1885 | break; |
| 1886 | } |
| 1887 | if (pos < PCI_CONFIG_HEADER_SIZE) { |
| 1888 | break; |
| 1889 | } |
| 1890 | |
| 1891 | pos &= ~3; |
| 1892 | if (xen_host_pci_get_byte(&s->real_device, |
| 1893 | pos + PCI_CAP_LIST_ID, &id)) { |
| 1894 | break; |
| 1895 | } |
| 1896 | |
| 1897 | if (id == 0xff) { |
| 1898 | break; |
| 1899 | } |
| 1900 | if (id == cap) { |
| 1901 | return pos; |
| 1902 | } |
| 1903 | |
| 1904 | pos += PCI_CAP_LIST_NEXT; |
| 1905 | } |
| 1906 | return 0; |
| 1907 | } |
| 1908 | |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 1909 | static void xen_pt_config_reg_init(XenPCIPassthroughState *s, |
| 1910 | XenPTRegGroup *reg_grp, XenPTRegInfo *reg, |
| 1911 | Error **errp) |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1912 | { |
| 1913 | XenPTReg *reg_entry; |
| 1914 | uint32_t data = 0; |
| 1915 | int rc = 0; |
| 1916 | |
| 1917 | reg_entry = g_new0(XenPTReg, 1); |
| 1918 | reg_entry->reg = reg; |
| 1919 | |
| 1920 | if (reg->init) { |
Konrad Rzeszutek Wilk | 2e87512 | 2015-06-29 16:24:40 -0400 | [diff] [blame] | 1921 | uint32_t host_mask, size_mask; |
| 1922 | unsigned int offset; |
| 1923 | uint32_t val; |
| 1924 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1925 | /* initialize emulate register */ |
| 1926 | rc = reg->init(s, reg_entry->reg, |
| 1927 | reg_grp->base_offset + reg->offset, &data); |
| 1928 | if (rc < 0) { |
Stefan Weil | c5633d9 | 2013-06-10 22:36:22 +0200 | [diff] [blame] | 1929 | g_free(reg_entry); |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 1930 | error_setg(errp, "Init emulate register fail"); |
| 1931 | return; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1932 | } |
| 1933 | if (data == XEN_PT_INVALID_REG) { |
| 1934 | /* free unused BAR register entry */ |
Stefan Weil | c5633d9 | 2013-06-10 22:36:22 +0200 | [diff] [blame] | 1935 | g_free(reg_entry); |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 1936 | return; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 1937 | } |
Konrad Rzeszutek Wilk | 2e87512 | 2015-06-29 16:24:40 -0400 | [diff] [blame] | 1938 | /* Sync up the data to dev.config */ |
| 1939 | offset = reg_grp->base_offset + reg->offset; |
| 1940 | size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3); |
| 1941 | |
| 1942 | switch (reg->size) { |
| 1943 | case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val); |
| 1944 | break; |
| 1945 | case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val); |
| 1946 | break; |
| 1947 | case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val); |
| 1948 | break; |
Paolo Bonzini | 2c21ec3 | 2015-10-19 16:08:39 +0200 | [diff] [blame] | 1949 | default: abort(); |
Konrad Rzeszutek Wilk | 2e87512 | 2015-06-29 16:24:40 -0400 | [diff] [blame] | 1950 | } |
| 1951 | if (rc) { |
| 1952 | /* Serious issues when we cannot read the host values! */ |
| 1953 | g_free(reg_entry); |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 1954 | error_setg(errp, "Cannot read host values"); |
| 1955 | return; |
Konrad Rzeszutek Wilk | 2e87512 | 2015-06-29 16:24:40 -0400 | [diff] [blame] | 1956 | } |
| 1957 | /* Set bits in emu_mask are the ones we emulate. The dev.config shall |
| 1958 | * contain the emulated view of the guest - therefore we flip the mask |
| 1959 | * to mask out the host values (which dev.config initially has) . */ |
| 1960 | host_mask = size_mask & ~reg->emu_mask; |
| 1961 | |
| 1962 | if ((data & host_mask) != (val & host_mask)) { |
| 1963 | uint32_t new_val; |
| 1964 | |
| 1965 | /* Mask out host (including past size). */ |
| 1966 | new_val = val & host_mask; |
| 1967 | /* Merge emulated ones (excluding the non-emulated ones). */ |
| 1968 | new_val |= data & host_mask; |
| 1969 | /* Leave intact host and emulated values past the size - even though |
| 1970 | * we do not care as we write per reg->size granularity, but for the |
| 1971 | * logging below lets have the proper value. */ |
| 1972 | new_val |= ((val | data)) & ~size_mask; |
| 1973 | XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n", |
| 1974 | offset, data, val, new_val); |
| 1975 | val = new_val; |
| 1976 | } else |
| 1977 | val = data; |
| 1978 | |
Konrad Rzeszutek Wilk | 5b4dd0f | 2015-06-29 16:41:14 -0400 | [diff] [blame] | 1979 | if (val & ~size_mask) { |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 1980 | error_setg(errp, "Offset 0x%04x:0x%04x expands past" |
| 1981 | " register size (%d)", offset, val, reg->size); |
Konrad Rzeszutek Wilk | 5b4dd0f | 2015-06-29 16:41:14 -0400 | [diff] [blame] | 1982 | g_free(reg_entry); |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 1983 | return; |
Konrad Rzeszutek Wilk | 5b4dd0f | 2015-06-29 16:41:14 -0400 | [diff] [blame] | 1984 | } |
Konrad Rzeszutek Wilk | 2e87512 | 2015-06-29 16:24:40 -0400 | [diff] [blame] | 1985 | /* This could be just pci_set_long as we don't modify the bits |
Konrad Rzeszutek Wilk | 5b4dd0f | 2015-06-29 16:41:14 -0400 | [diff] [blame] | 1986 | * past reg->size, but in case this routine is run in parallel or the |
| 1987 | * init value is larger, we do not want to over-write registers. */ |
Konrad Rzeszutek Wilk | 2e87512 | 2015-06-29 16:24:40 -0400 | [diff] [blame] | 1988 | switch (reg->size) { |
| 1989 | case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val); |
| 1990 | break; |
| 1991 | case 2: pci_set_word(s->dev.config + offset, (uint16_t)val); |
| 1992 | break; |
| 1993 | case 4: pci_set_long(s->dev.config + offset, val); |
| 1994 | break; |
Paolo Bonzini | 2c21ec3 | 2015-10-19 16:08:39 +0200 | [diff] [blame] | 1995 | default: abort(); |
Konrad Rzeszutek Wilk | 2e87512 | 2015-06-29 16:24:40 -0400 | [diff] [blame] | 1996 | } |
Konrad Rzeszutek Wilk | e2779de | 2015-07-01 15:41:33 -0400 | [diff] [blame] | 1997 | /* set register value pointer to the data. */ |
| 1998 | reg_entry->ptr.byte = s->dev.config + offset; |
Konrad Rzeszutek Wilk | 2e87512 | 2015-06-29 16:24:40 -0400 | [diff] [blame] | 1999 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2000 | } |
| 2001 | /* list add register entry */ |
| 2002 | QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2003 | } |
| 2004 | |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 2005 | void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp) |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2006 | { |
| 2007 | int i, rc; |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 2008 | Error *err = NULL; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2009 | |
| 2010 | QLIST_INIT(&s->reg_grps); |
| 2011 | |
| 2012 | for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { |
| 2013 | uint32_t reg_grp_offset = 0; |
| 2014 | XenPTRegGroup *reg_grp_entry = NULL; |
| 2015 | |
Tiejun Chen | 5cec8aa | 2015-07-15 13:37:50 +0800 | [diff] [blame] | 2016 | if (xen_pt_emu_reg_grps[i].grp_id != 0xFF |
| 2017 | && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) { |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2018 | if (xen_pt_hide_dev_cap(&s->real_device, |
| 2019 | xen_pt_emu_reg_grps[i].grp_id)) { |
| 2020 | continue; |
| 2021 | } |
| 2022 | |
| 2023 | reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); |
| 2024 | |
| 2025 | if (!reg_grp_offset) { |
| 2026 | continue; |
| 2027 | } |
| 2028 | } |
| 2029 | |
Tiejun Chen | 5cec8aa | 2015-07-15 13:37:50 +0800 | [diff] [blame] | 2030 | /* |
| 2031 | * By default we will trap up to 0x40 in the cfg space. |
| 2032 | * If an intel device is pass through we need to trap 0xfc, |
| 2033 | * therefore the size should be 0xff. |
| 2034 | */ |
| 2035 | if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) { |
| 2036 | reg_grp_offset = XEN_PCI_INTEL_OPREGION; |
| 2037 | } |
| 2038 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2039 | reg_grp_entry = g_new0(XenPTRegGroup, 1); |
| 2040 | QLIST_INIT(®_grp_entry->reg_tbl_list); |
| 2041 | QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); |
| 2042 | |
| 2043 | reg_grp_entry->base_offset = reg_grp_offset; |
| 2044 | reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; |
| 2045 | if (xen_pt_emu_reg_grps[i].size_init) { |
| 2046 | /* get register group size */ |
| 2047 | rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, |
| 2048 | reg_grp_offset, |
| 2049 | ®_grp_entry->size); |
| 2050 | if (rc < 0) { |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 2051 | error_setg(&err, "Failed to initialize %d/%zu, type = 0x%x," |
| 2052 | " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps), |
Konrad Rzeszutek Wilk | ea6c50f | 2015-06-24 17:18:26 -0400 | [diff] [blame] | 2053 | xen_pt_emu_reg_grps[i].grp_type, rc); |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 2054 | error_propagate(errp, err); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2055 | xen_pt_config_delete(s); |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 2056 | return; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2057 | } |
| 2058 | } |
| 2059 | |
| 2060 | if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { |
| 2061 | if (xen_pt_emu_reg_grps[i].emu_regs) { |
| 2062 | int j = 0; |
| 2063 | XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 2064 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2065 | /* initialize capability register */ |
| 2066 | for (j = 0; regs->size != 0; j++, regs++) { |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 2067 | xen_pt_config_reg_init(s, reg_grp_entry, regs, &err); |
| 2068 | if (err) { |
Cao jin | c4f68f0 | 2016-01-25 20:16:03 +0800 | [diff] [blame] | 2069 | error_append_hint(&err, "Failed to init register %d" |
| 2070 | " offsets 0x%x in grp_type = 0x%x (%d/%zu)", j, |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 2071 | regs->offset, xen_pt_emu_reg_grps[i].grp_type, |
| 2072 | i, ARRAY_SIZE(xen_pt_emu_reg_grps)); |
| 2073 | error_propagate(errp, err); |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2074 | xen_pt_config_delete(s); |
Cao jin | d50a6e5 | 2016-01-17 20:13:14 +0800 | [diff] [blame] | 2075 | return; |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2076 | } |
| 2077 | } |
| 2078 | } |
| 2079 | } |
| 2080 | } |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2081 | } |
| 2082 | |
| 2083 | /* delete all emulate register */ |
| 2084 | void xen_pt_config_delete(XenPCIPassthroughState *s) |
| 2085 | { |
| 2086 | struct XenPTRegGroup *reg_group, *next_grp; |
| 2087 | struct XenPTReg *reg, *next_reg; |
| 2088 | |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 2089 | /* free MSI/MSI-X info table */ |
| 2090 | if (s->msix) { |
Lan Tianyu | 4e494de | 2015-10-11 23:19:24 +0800 | [diff] [blame] | 2091 | xen_pt_msix_unmap(s); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 2092 | } |
Daniel P. Berrange | ef1e1e0 | 2015-08-26 12:17:18 +0100 | [diff] [blame] | 2093 | g_free(s->msi); |
Jiang Yunhong | 3854ca5 | 2012-06-21 15:42:35 +0000 | [diff] [blame] | 2094 | |
Allen Kay | 93d7ae8 | 2012-06-21 15:40:48 +0000 | [diff] [blame] | 2095 | /* free all register group entry */ |
| 2096 | QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { |
| 2097 | /* free all register entry */ |
| 2098 | QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { |
| 2099 | QLIST_REMOVE(reg, entries); |
| 2100 | g_free(reg); |
| 2101 | } |
| 2102 | |
| 2103 | QLIST_REMOVE(reg_group, entries); |
| 2104 | g_free(reg_group); |
| 2105 | } |
| 2106 | } |