1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * VFIO PCI config space virtualization |
4 | * |
5 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. |
6 | * Author: Alex Williamson <alex.williamson@redhat.com> |
7 | * |
8 | * Derived from original vfio: |
9 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. |
10 | * Author: Tom Lyon, pugs@cisco.com |
11 | */ |
12 | |
13 | /* |
14 | * This code handles reading and writing of PCI configuration registers. |
15 | * This is hairy because we want to allow a lot of flexibility to the |
16 | * user driver, but cannot trust it with all of the config fields. |
17 | * Tables determine which fields can be read and written, as well as |
18 | * which fields are 'virtualized' - special actions and translations to |
19 | * make it appear to the user that he has control, when in fact things |
20 | * must be negotiated with the underlying OS. |
21 | */ |
22 | |
23 | #include <linux/fs.h> |
24 | #include <linux/pci.h> |
25 | #include <linux/uaccess.h> |
26 | #include <linux/vfio.h> |
27 | #include <linux/slab.h> |
28 | |
29 | #include "vfio_pci_priv.h" |
30 | |
31 | /* Fake capability ID for standard config space */ |
32 | #define PCI_CAP_ID_BASIC 0 |
33 | |
34 | #define is_bar(offset) \ |
35 | ((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \ |
36 | (offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4)) |
37 | |
38 | /* |
39 | * Lengths of PCI Config Capabilities |
40 | * 0: Removed from the user visible capability list |
41 | * FF: Variable length |
42 | */ |
43 | static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = { |
44 | [PCI_CAP_ID_BASIC] = PCI_STD_HEADER_SIZEOF, /* pci config header */ |
45 | [PCI_CAP_ID_PM] = PCI_PM_SIZEOF, |
46 | [PCI_CAP_ID_AGP] = PCI_AGP_SIZEOF, |
47 | [PCI_CAP_ID_VPD] = PCI_CAP_VPD_SIZEOF, |
48 | [PCI_CAP_ID_SLOTID] = 0, /* bridge - don't care */ |
49 | [PCI_CAP_ID_MSI] = 0xFF, /* 10, 14, 20, or 24 */ |
50 | [PCI_CAP_ID_CHSWP] = 0, /* cpci - not yet */ |
51 | [PCI_CAP_ID_PCIX] = 0xFF, /* 8 or 24 */ |
52 | [PCI_CAP_ID_HT] = 0xFF, /* hypertransport */ |
53 | [PCI_CAP_ID_VNDR] = 0xFF, /* variable */ |
54 | [PCI_CAP_ID_DBG] = 0, /* debug - don't care */ |
55 | [PCI_CAP_ID_CCRC] = 0, /* cpci - not yet */ |
56 | [PCI_CAP_ID_SHPC] = 0, /* hotswap - not yet */ |
57 | [PCI_CAP_ID_SSVID] = 0, /* bridge - don't care */ |
58 | [PCI_CAP_ID_AGP3] = 0, /* AGP8x - not yet */ |
59 | [PCI_CAP_ID_SECDEV] = 0, /* secure device not yet */ |
60 | [PCI_CAP_ID_EXP] = 0xFF, /* 20 or 44 */ |
61 | [PCI_CAP_ID_MSIX] = PCI_CAP_MSIX_SIZEOF, |
62 | [PCI_CAP_ID_SATA] = 0xFF, |
63 | [PCI_CAP_ID_AF] = PCI_CAP_AF_SIZEOF, |
64 | }; |
65 | |
66 | /* |
67 | * Lengths of PCIe/PCI-X Extended Config Capabilities |
68 | * 0: Removed or masked from the user visible capability list |
69 | * FF: Variable length |
70 | */ |
71 | static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = { |
72 | [PCI_EXT_CAP_ID_ERR] = PCI_ERR_ROOT_COMMAND, |
73 | [PCI_EXT_CAP_ID_VC] = 0xFF, |
74 | [PCI_EXT_CAP_ID_DSN] = PCI_EXT_CAP_DSN_SIZEOF, |
75 | [PCI_EXT_CAP_ID_PWR] = PCI_EXT_CAP_PWR_SIZEOF, |
76 | [PCI_EXT_CAP_ID_RCLD] = 0, /* root only - don't care */ |
77 | [PCI_EXT_CAP_ID_RCILC] = 0, /* root only - don't care */ |
78 | [PCI_EXT_CAP_ID_RCEC] = 0, /* root only - don't care */ |
79 | [PCI_EXT_CAP_ID_MFVC] = 0xFF, |
80 | [PCI_EXT_CAP_ID_VC9] = 0xFF, /* same as CAP_ID_VC */ |
81 | [PCI_EXT_CAP_ID_RCRB] = 0, /* root only - don't care */ |
82 | [PCI_EXT_CAP_ID_VNDR] = 0xFF, |
83 | [PCI_EXT_CAP_ID_CAC] = 0, /* obsolete */ |
84 | [PCI_EXT_CAP_ID_ACS] = 0xFF, |
85 | [PCI_EXT_CAP_ID_ARI] = PCI_EXT_CAP_ARI_SIZEOF, |
86 | [PCI_EXT_CAP_ID_ATS] = PCI_EXT_CAP_ATS_SIZEOF, |
87 | [PCI_EXT_CAP_ID_SRIOV] = PCI_EXT_CAP_SRIOV_SIZEOF, |
88 | [PCI_EXT_CAP_ID_MRIOV] = 0, /* not yet */ |
89 | [PCI_EXT_CAP_ID_MCAST] = PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF, |
90 | [PCI_EXT_CAP_ID_PRI] = PCI_EXT_CAP_PRI_SIZEOF, |
91 | [PCI_EXT_CAP_ID_AMD_XXX] = 0, /* not yet */ |
92 | [PCI_EXT_CAP_ID_REBAR] = 0xFF, |
93 | [PCI_EXT_CAP_ID_DPA] = 0xFF, |
94 | [PCI_EXT_CAP_ID_TPH] = 0xFF, |
95 | [PCI_EXT_CAP_ID_LTR] = PCI_EXT_CAP_LTR_SIZEOF, |
96 | [PCI_EXT_CAP_ID_SECPCI] = 0, /* not yet */ |
97 | [PCI_EXT_CAP_ID_PMUX] = 0, /* not yet */ |
98 | [PCI_EXT_CAP_ID_PASID] = 0, /* not yet */ |
99 | [PCI_EXT_CAP_ID_DVSEC] = 0xFF, |
100 | }; |
101 | |
102 | /* |
103 | * Read/Write Permission Bits - one bit for each bit in capability |
104 | * Any field can be read if it exists, but what is read depends on |
105 | * whether the field is 'virtualized', or just pass through to the |
106 | * hardware. Any virtualized field is also virtualized for writes. |
107 | * Writes are only permitted if they have a 1 bit here. |
108 | */ |
109 | struct perm_bits { |
110 | u8 *virt; /* read/write virtual data, not hw */ |
111 | u8 *write; /* writeable bits */ |
112 | int (*readfn)(struct vfio_pci_core_device *vdev, int pos, int count, |
113 | struct perm_bits *perm, int offset, __le32 *val); |
114 | int (*writefn)(struct vfio_pci_core_device *vdev, int pos, int count, |
115 | struct perm_bits *perm, int offset, __le32 val); |
116 | }; |
117 | |
118 | #define NO_VIRT 0 |
119 | #define ALL_VIRT 0xFFFFFFFFU |
120 | #define NO_WRITE 0 |
121 | #define ALL_WRITE 0xFFFFFFFFU |
122 | |
123 | static int vfio_user_config_read(struct pci_dev *pdev, int offset, |
124 | __le32 *val, int count) |
125 | { |
126 | int ret = -EINVAL; |
127 | u32 tmp_val = 0; |
128 | |
129 | switch (count) { |
130 | case 1: |
131 | { |
132 | u8 tmp; |
133 | ret = pci_user_read_config_byte(dev: pdev, where: offset, val: &tmp); |
134 | tmp_val = tmp; |
135 | break; |
136 | } |
137 | case 2: |
138 | { |
139 | u16 tmp; |
140 | ret = pci_user_read_config_word(dev: pdev, where: offset, val: &tmp); |
141 | tmp_val = tmp; |
142 | break; |
143 | } |
144 | case 4: |
145 | ret = pci_user_read_config_dword(dev: pdev, where: offset, val: &tmp_val); |
146 | break; |
147 | } |
148 | |
149 | *val = cpu_to_le32(tmp_val); |
150 | |
151 | return ret; |
152 | } |
153 | |
154 | static int vfio_user_config_write(struct pci_dev *pdev, int offset, |
155 | __le32 val, int count) |
156 | { |
157 | int ret = -EINVAL; |
158 | u32 tmp_val = le32_to_cpu(val); |
159 | |
160 | switch (count) { |
161 | case 1: |
162 | ret = pci_user_write_config_byte(dev: pdev, where: offset, val: tmp_val); |
163 | break; |
164 | case 2: |
165 | ret = pci_user_write_config_word(dev: pdev, where: offset, val: tmp_val); |
166 | break; |
167 | case 4: |
168 | ret = pci_user_write_config_dword(dev: pdev, where: offset, val: tmp_val); |
169 | break; |
170 | } |
171 | |
172 | return ret; |
173 | } |
174 | |
175 | static int vfio_default_config_read(struct vfio_pci_core_device *vdev, int pos, |
176 | int count, struct perm_bits *perm, |
177 | int offset, __le32 *val) |
178 | { |
179 | __le32 virt = 0; |
180 | |
181 | memcpy(val, vdev->vconfig + pos, count); |
182 | |
183 | memcpy(&virt, perm->virt + offset, count); |
184 | |
185 | /* Any non-virtualized bits? */ |
186 | if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) { |
187 | struct pci_dev *pdev = vdev->pdev; |
188 | __le32 phys_val = 0; |
189 | int ret; |
190 | |
191 | ret = vfio_user_config_read(pdev, offset: pos, val: &phys_val, count); |
192 | if (ret) |
193 | return ret; |
194 | |
195 | *val = (phys_val & ~virt) | (*val & virt); |
196 | } |
197 | |
198 | return count; |
199 | } |
200 | |
201 | static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos, |
202 | int count, struct perm_bits *perm, |
203 | int offset, __le32 val) |
204 | { |
205 | __le32 virt = 0, write = 0; |
206 | |
207 | memcpy(&write, perm->write + offset, count); |
208 | |
209 | if (!write) |
210 | return count; /* drop, no writable bits */ |
211 | |
212 | memcpy(&virt, perm->virt + offset, count); |
213 | |
214 | /* Virtualized and writable bits go to vconfig */ |
215 | if (write & virt) { |
216 | __le32 virt_val = 0; |
217 | |
218 | memcpy(&virt_val, vdev->vconfig + pos, count); |
219 | |
220 | virt_val &= ~(write & virt); |
221 | virt_val |= (val & (write & virt)); |
222 | |
223 | memcpy(vdev->vconfig + pos, &virt_val, count); |
224 | } |
225 | |
226 | /* Non-virtualized and writable bits go to hardware */ |
227 | if (write & ~virt) { |
228 | struct pci_dev *pdev = vdev->pdev; |
229 | __le32 phys_val = 0; |
230 | int ret; |
231 | |
232 | ret = vfio_user_config_read(pdev, offset: pos, val: &phys_val, count); |
233 | if (ret) |
234 | return ret; |
235 | |
236 | phys_val &= ~(write & ~virt); |
237 | phys_val |= (val & (write & ~virt)); |
238 | |
239 | ret = vfio_user_config_write(pdev, offset: pos, val: phys_val, count); |
240 | if (ret) |
241 | return ret; |
242 | } |
243 | |
244 | return count; |
245 | } |
246 | |
247 | /* Allow direct read from hardware, except for capability next pointer */ |
248 | static int vfio_direct_config_read(struct vfio_pci_core_device *vdev, int pos, |
249 | int count, struct perm_bits *perm, |
250 | int offset, __le32 *val) |
251 | { |
252 | int ret; |
253 | |
254 | ret = vfio_user_config_read(pdev: vdev->pdev, offset: pos, val, count); |
255 | if (ret) |
256 | return ret; |
257 | |
258 | if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */ |
259 | if (offset < 4) |
260 | memcpy(val, vdev->vconfig + pos, count); |
261 | } else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */ |
262 | if (offset == PCI_CAP_LIST_ID && count > 1) |
263 | memcpy(val, vdev->vconfig + pos, |
264 | min(PCI_CAP_FLAGS, count)); |
265 | else if (offset == PCI_CAP_LIST_NEXT) |
266 | memcpy(val, vdev->vconfig + pos, 1); |
267 | } |
268 | |
269 | return count; |
270 | } |
271 | |
272 | /* Raw access skips any kind of virtualization */ |
273 | static int vfio_raw_config_write(struct vfio_pci_core_device *vdev, int pos, |
274 | int count, struct perm_bits *perm, |
275 | int offset, __le32 val) |
276 | { |
277 | int ret; |
278 | |
279 | ret = vfio_user_config_write(pdev: vdev->pdev, offset: pos, val, count); |
280 | if (ret) |
281 | return ret; |
282 | |
283 | return count; |
284 | } |
285 | |
286 | static int vfio_raw_config_read(struct vfio_pci_core_device *vdev, int pos, |
287 | int count, struct perm_bits *perm, |
288 | int offset, __le32 *val) |
289 | { |
290 | int ret; |
291 | |
292 | ret = vfio_user_config_read(pdev: vdev->pdev, offset: pos, val, count); |
293 | if (ret) |
294 | return ret; |
295 | |
296 | return count; |
297 | } |
298 | |
299 | /* Virt access uses only virtualization */ |
300 | static int vfio_virt_config_write(struct vfio_pci_core_device *vdev, int pos, |
301 | int count, struct perm_bits *perm, |
302 | int offset, __le32 val) |
303 | { |
304 | memcpy(vdev->vconfig + pos, &val, count); |
305 | return count; |
306 | } |
307 | |
308 | static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos, |
309 | int count, struct perm_bits *perm, |
310 | int offset, __le32 *val) |
311 | { |
312 | memcpy(val, vdev->vconfig + pos, count); |
313 | return count; |
314 | } |
315 | |
316 | /* Default capability regions to read-only, no-virtualization */ |
317 | static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = { |
318 | [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } |
319 | }; |
320 | static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = { |
321 | [0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } |
322 | }; |
323 | /* |
324 | * Default unassigned regions to raw read-write access. Some devices |
325 | * require this to function as they hide registers between the gaps in |
326 | * config space (be2net). Like MMIO and I/O port registers, we have |
327 | * to trust the hardware isolation. |
328 | */ |
329 | static struct perm_bits unassigned_perms = { |
330 | .readfn = vfio_raw_config_read, |
331 | .writefn = vfio_raw_config_write |
332 | }; |
333 | |
334 | static struct perm_bits virt_perms = { |
335 | .readfn = vfio_virt_config_read, |
336 | .writefn = vfio_virt_config_write |
337 | }; |
338 | |
339 | static void free_perm_bits(struct perm_bits *perm) |
340 | { |
341 | kfree(objp: perm->virt); |
342 | kfree(objp: perm->write); |
343 | perm->virt = NULL; |
344 | perm->write = NULL; |
345 | } |
346 | |
347 | static int alloc_perm_bits(struct perm_bits *perm, int size) |
348 | { |
349 | /* |
350 | * Round up all permission bits to the next dword, this lets us |
351 | * ignore whether a read/write exceeds the defined capability |
352 | * structure. We can do this because: |
353 | * - Standard config space is already dword aligned |
354 | * - Capabilities are all dword aligned (bits 0:1 of next reserved) |
355 | * - Express capabilities defined as dword aligned |
356 | */ |
357 | size = round_up(size, 4); |
358 | |
359 | /* |
360 | * Zero state is |
361 | * - All Readable, None Writeable, None Virtualized |
362 | */ |
363 | perm->virt = kzalloc(size, GFP_KERNEL); |
364 | perm->write = kzalloc(size, GFP_KERNEL); |
365 | if (!perm->virt || !perm->write) { |
366 | free_perm_bits(perm); |
367 | return -ENOMEM; |
368 | } |
369 | |
370 | perm->readfn = vfio_default_config_read; |
371 | perm->writefn = vfio_default_config_write; |
372 | |
373 | return 0; |
374 | } |
375 | |
376 | /* |
377 | * Helper functions for filling in permission tables |
378 | */ |
379 | static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write) |
380 | { |
381 | p->virt[off] = virt; |
382 | p->write[off] = write; |
383 | } |
384 | |
385 | /* Handle endian-ness - pci and tables are little-endian */ |
386 | static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write) |
387 | { |
388 | *(__le16 *)(&p->virt[off]) = cpu_to_le16(virt); |
389 | *(__le16 *)(&p->write[off]) = cpu_to_le16(write); |
390 | } |
391 | |
392 | /* Handle endian-ness - pci and tables are little-endian */ |
393 | static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) |
394 | { |
395 | *(__le32 *)(&p->virt[off]) = cpu_to_le32(virt); |
396 | *(__le32 *)(&p->write[off]) = cpu_to_le32(write); |
397 | } |
398 | |
399 | /* Caller should hold memory_lock semaphore */ |
400 | bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev) |
401 | { |
402 | struct pci_dev *pdev = vdev->pdev; |
403 | u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); |
404 | |
405 | /* |
406 | * Memory region cannot be accessed if device power state is D3. |
407 | * |
408 | * SR-IOV VF memory enable is handled by the MSE bit in the |
409 | * PF SR-IOV capability, there's therefore no need to trigger |
410 | * faults based on the virtual value. |
411 | */ |
412 | return pdev->current_state < PCI_D3hot && |
413 | (pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY)); |
414 | } |
415 | |
416 | /* |
417 | * Restore the *real* BARs after we detect a FLR or backdoor reset. |
418 | * (backdoor = some device specific technique that we didn't catch) |
419 | */ |
420 | static void vfio_bar_restore(struct vfio_pci_core_device *vdev) |
421 | { |
422 | struct pci_dev *pdev = vdev->pdev; |
423 | u32 *rbar = vdev->rbar; |
424 | u16 cmd; |
425 | int i; |
426 | |
427 | if (pdev->is_virtfn) |
428 | return; |
429 | |
430 | pci_info(pdev, "%s: reset recovery - restoring BARs\n" , __func__); |
431 | |
432 | for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++) |
433 | pci_user_write_config_dword(dev: pdev, where: i, val: *rbar); |
434 | |
435 | pci_user_write_config_dword(dev: pdev, PCI_ROM_ADDRESS, val: *rbar); |
436 | |
437 | if (vdev->nointx) { |
438 | pci_user_read_config_word(dev: pdev, PCI_COMMAND, val: &cmd); |
439 | cmd |= PCI_COMMAND_INTX_DISABLE; |
440 | pci_user_write_config_word(dev: pdev, PCI_COMMAND, val: cmd); |
441 | } |
442 | } |
443 | |
444 | static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar) |
445 | { |
446 | unsigned long flags = pci_resource_flags(pdev, bar); |
447 | u32 val; |
448 | |
449 | if (flags & IORESOURCE_IO) |
450 | return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO); |
451 | |
452 | val = PCI_BASE_ADDRESS_SPACE_MEMORY; |
453 | |
454 | if (flags & IORESOURCE_PREFETCH) |
455 | val |= PCI_BASE_ADDRESS_MEM_PREFETCH; |
456 | |
457 | if (flags & IORESOURCE_MEM_64) |
458 | val |= PCI_BASE_ADDRESS_MEM_TYPE_64; |
459 | |
460 | return cpu_to_le32(val); |
461 | } |
462 | |
463 | /* |
464 | * Pretend we're hardware and tweak the values of the *virtual* PCI BARs |
465 | * to reflect the hardware capabilities. This implements BAR sizing. |
466 | */ |
467 | static void vfio_bar_fixup(struct vfio_pci_core_device *vdev) |
468 | { |
469 | struct pci_dev *pdev = vdev->pdev; |
470 | int i; |
471 | __le32 *vbar; |
472 | u64 mask; |
473 | |
474 | if (!vdev->bardirty) |
475 | return; |
476 | |
477 | vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0]; |
478 | |
479 | for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) { |
480 | int bar = i + PCI_STD_RESOURCES; |
481 | |
482 | if (!pci_resource_start(pdev, bar)) { |
483 | *vbar = 0; /* Unmapped by host = unimplemented to user */ |
484 | continue; |
485 | } |
486 | |
487 | mask = ~(pci_resource_len(pdev, bar) - 1); |
488 | |
489 | *vbar &= cpu_to_le32((u32)mask); |
490 | *vbar |= vfio_generate_bar_flags(pdev, bar); |
491 | |
492 | if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) { |
493 | vbar++; |
494 | *vbar &= cpu_to_le32((u32)(mask >> 32)); |
495 | i++; |
496 | } |
497 | } |
498 | |
499 | vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS]; |
500 | |
501 | /* |
502 | * NB. REGION_INFO will have reported zero size if we weren't able |
503 | * to read the ROM, but we still return the actual BAR size here if |
504 | * it exists (or the shadow ROM space). |
505 | */ |
506 | if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) { |
507 | mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1); |
508 | mask |= PCI_ROM_ADDRESS_ENABLE; |
509 | *vbar &= cpu_to_le32((u32)mask); |
510 | } else if (pdev->resource[PCI_ROM_RESOURCE].flags & |
511 | IORESOURCE_ROM_SHADOW) { |
512 | mask = ~(0x20000 - 1); |
513 | mask |= PCI_ROM_ADDRESS_ENABLE; |
514 | *vbar &= cpu_to_le32((u32)mask); |
515 | } else |
516 | *vbar = 0; |
517 | |
518 | vdev->bardirty = false; |
519 | } |
520 | |
521 | static int vfio_basic_config_read(struct vfio_pci_core_device *vdev, int pos, |
522 | int count, struct perm_bits *perm, |
523 | int offset, __le32 *val) |
524 | { |
525 | if (is_bar(offset)) /* pos == offset for basic config */ |
526 | vfio_bar_fixup(vdev); |
527 | |
528 | count = vfio_default_config_read(vdev, pos, count, perm, offset, val); |
529 | |
530 | /* Mask in virtual memory enable */ |
531 | if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) { |
532 | u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); |
533 | u32 tmp_val = le32_to_cpu(*val); |
534 | |
535 | tmp_val |= cmd & PCI_COMMAND_MEMORY; |
536 | *val = cpu_to_le32(tmp_val); |
537 | } |
538 | |
539 | return count; |
540 | } |
541 | |
542 | /* Test whether BARs match the value we think they should contain */ |
543 | static bool vfio_need_bar_restore(struct vfio_pci_core_device *vdev) |
544 | { |
545 | int i = 0, pos = PCI_BASE_ADDRESS_0, ret; |
546 | u32 bar; |
547 | |
548 | for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) { |
549 | if (vdev->rbar[i]) { |
550 | ret = pci_user_read_config_dword(dev: vdev->pdev, where: pos, val: &bar); |
551 | if (ret || vdev->rbar[i] != bar) |
552 | return true; |
553 | } |
554 | } |
555 | |
556 | return false; |
557 | } |
558 | |
559 | static int vfio_basic_config_write(struct vfio_pci_core_device *vdev, int pos, |
560 | int count, struct perm_bits *perm, |
561 | int offset, __le32 val) |
562 | { |
563 | struct pci_dev *pdev = vdev->pdev; |
564 | __le16 *virt_cmd; |
565 | u16 new_cmd = 0; |
566 | int ret; |
567 | |
568 | virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND]; |
569 | |
570 | if (offset == PCI_COMMAND) { |
571 | bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io; |
572 | u16 phys_cmd; |
573 | |
574 | ret = pci_user_read_config_word(dev: pdev, PCI_COMMAND, val: &phys_cmd); |
575 | if (ret) |
576 | return ret; |
577 | |
578 | new_cmd = le32_to_cpu(val); |
579 | |
580 | phys_io = !!(phys_cmd & PCI_COMMAND_IO); |
581 | virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); |
582 | new_io = !!(new_cmd & PCI_COMMAND_IO); |
583 | |
584 | phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); |
585 | virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); |
586 | new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); |
587 | |
588 | if (!new_mem) |
589 | vfio_pci_zap_and_down_write_memory_lock(vdev); |
590 | else |
591 | down_write(sem: &vdev->memory_lock); |
592 | |
593 | /* |
594 | * If the user is writing mem/io enable (new_mem/io) and we |
595 | * think it's already enabled (virt_mem/io), but the hardware |
596 | * shows it disabled (phys_mem/io, then the device has |
597 | * undergone some kind of backdoor reset and needs to be |
598 | * restored before we allow it to enable the bars. |
599 | * SR-IOV devices will trigger this - for mem enable let's |
600 | * catch this now and for io enable it will be caught later |
601 | */ |
602 | if ((new_mem && virt_mem && !phys_mem && |
603 | !pdev->no_command_memory) || |
604 | (new_io && virt_io && !phys_io) || |
605 | vfio_need_bar_restore(vdev)) |
606 | vfio_bar_restore(vdev); |
607 | } |
608 | |
609 | count = vfio_default_config_write(vdev, pos, count, perm, offset, val); |
610 | if (count < 0) { |
611 | if (offset == PCI_COMMAND) |
612 | up_write(sem: &vdev->memory_lock); |
613 | return count; |
614 | } |
615 | |
616 | /* |
617 | * Save current memory/io enable bits in vconfig to allow for |
618 | * the test above next time. |
619 | */ |
620 | if (offset == PCI_COMMAND) { |
621 | u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO; |
622 | |
623 | *virt_cmd &= cpu_to_le16(~mask); |
624 | *virt_cmd |= cpu_to_le16(new_cmd & mask); |
625 | |
626 | up_write(sem: &vdev->memory_lock); |
627 | } |
628 | |
629 | /* Emulate INTx disable */ |
630 | if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) { |
631 | bool virt_intx_disable; |
632 | |
633 | virt_intx_disable = !!(le16_to_cpu(*virt_cmd) & |
634 | PCI_COMMAND_INTX_DISABLE); |
635 | |
636 | if (virt_intx_disable && !vdev->virq_disabled) { |
637 | vdev->virq_disabled = true; |
638 | vfio_pci_intx_mask(vdev); |
639 | } else if (!virt_intx_disable && vdev->virq_disabled) { |
640 | vdev->virq_disabled = false; |
641 | vfio_pci_intx_unmask(vdev); |
642 | } |
643 | } |
644 | |
645 | if (is_bar(offset)) |
646 | vdev->bardirty = true; |
647 | |
648 | return count; |
649 | } |
650 | |
651 | /* Permissions for the Basic PCI Header */ |
652 | static int __init init_pci_cap_basic_perm(struct perm_bits *perm) |
653 | { |
654 | if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF)) |
655 | return -ENOMEM; |
656 | |
657 | perm->readfn = vfio_basic_config_read; |
658 | perm->writefn = vfio_basic_config_write; |
659 | |
660 | /* Virtualized for SR-IOV functions, which just have FFFF */ |
661 | p_setw(p: perm, PCI_VENDOR_ID, virt: (u16)ALL_VIRT, NO_WRITE); |
662 | p_setw(p: perm, PCI_DEVICE_ID, virt: (u16)ALL_VIRT, NO_WRITE); |
663 | |
664 | /* |
665 | * Virtualize INTx disable, we use it internally for interrupt |
666 | * control and can emulate it for non-PCI 2.3 devices. |
667 | */ |
668 | p_setw(p: perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, write: (u16)ALL_WRITE); |
669 | |
670 | /* Virtualize capability list, we might want to skip/disable */ |
671 | p_setw(p: perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE); |
672 | |
673 | /* No harm to write */ |
674 | p_setb(p: perm, PCI_CACHE_LINE_SIZE, NO_VIRT, write: (u8)ALL_WRITE); |
675 | p_setb(p: perm, PCI_LATENCY_TIMER, NO_VIRT, write: (u8)ALL_WRITE); |
676 | p_setb(p: perm, PCI_BIST, NO_VIRT, write: (u8)ALL_WRITE); |
677 | |
678 | /* Virtualize all bars, can't touch the real ones */ |
679 | p_setd(p: perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE); |
680 | p_setd(p: perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE); |
681 | p_setd(p: perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE); |
682 | p_setd(p: perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE); |
683 | p_setd(p: perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE); |
684 | p_setd(p: perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE); |
685 | p_setd(p: perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE); |
686 | |
687 | /* Allow us to adjust capability chain */ |
688 | p_setb(p: perm, PCI_CAPABILITY_LIST, virt: (u8)ALL_VIRT, NO_WRITE); |
689 | |
690 | /* Sometimes used by sw, just virtualize */ |
691 | p_setb(p: perm, PCI_INTERRUPT_LINE, virt: (u8)ALL_VIRT, write: (u8)ALL_WRITE); |
692 | |
693 | /* Virtualize interrupt pin to allow hiding INTx */ |
694 | p_setb(p: perm, PCI_INTERRUPT_PIN, virt: (u8)ALL_VIRT, write: (u8)NO_WRITE); |
695 | |
696 | return 0; |
697 | } |
698 | |
699 | /* |
700 | * It takes all the required locks to protect the access of power related |
701 | * variables and then invokes vfio_pci_set_power_state(). |
702 | */ |
703 | static void vfio_lock_and_set_power_state(struct vfio_pci_core_device *vdev, |
704 | pci_power_t state) |
705 | { |
706 | if (state >= PCI_D3hot) |
707 | vfio_pci_zap_and_down_write_memory_lock(vdev); |
708 | else |
709 | down_write(sem: &vdev->memory_lock); |
710 | |
711 | vfio_pci_set_power_state(vdev, state); |
712 | up_write(sem: &vdev->memory_lock); |
713 | } |
714 | |
715 | static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos, |
716 | int count, struct perm_bits *perm, |
717 | int offset, __le32 val) |
718 | { |
719 | count = vfio_default_config_write(vdev, pos, count, perm, offset, val); |
720 | if (count < 0) |
721 | return count; |
722 | |
723 | if (offset == PCI_PM_CTRL) { |
724 | pci_power_t state; |
725 | |
726 | switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) { |
727 | case 0: |
728 | state = PCI_D0; |
729 | break; |
730 | case 1: |
731 | state = PCI_D1; |
732 | break; |
733 | case 2: |
734 | state = PCI_D2; |
735 | break; |
736 | case 3: |
737 | state = PCI_D3hot; |
738 | break; |
739 | } |
740 | |
741 | vfio_lock_and_set_power_state(vdev, state); |
742 | } |
743 | |
744 | return count; |
745 | } |
746 | |
747 | /* Permissions for the Power Management capability */ |
748 | static int __init init_pci_cap_pm_perm(struct perm_bits *perm) |
749 | { |
750 | if (alloc_perm_bits(perm, size: pci_cap_length[PCI_CAP_ID_PM])) |
751 | return -ENOMEM; |
752 | |
753 | perm->writefn = vfio_pm_config_write; |
754 | |
755 | /* |
756 | * We always virtualize the next field so we can remove |
757 | * capabilities from the chain if we want to. |
758 | */ |
759 | p_setb(p: perm, PCI_CAP_LIST_NEXT, virt: (u8)ALL_VIRT, NO_WRITE); |
760 | |
761 | /* |
762 | * The guests can't process PME events. If any PME event will be |
763 | * generated, then it will be mostly handled in the host and the |
764 | * host will clear the PME_STATUS. So virtualize PME_Support bits. |
765 | * The vconfig bits will be cleared during device capability |
766 | * initialization. |
767 | */ |
768 | p_setw(p: perm, PCI_PM_PMC, PCI_PM_CAP_PME_MASK, NO_WRITE); |
769 | |
770 | /* |
771 | * Power management is defined *per function*, so we can let |
772 | * the user change power state, but we trap and initiate the |
773 | * change ourselves, so the state bits are read-only. |
774 | * |
775 | * The guest can't process PME from D3cold so virtualize PME_Status |
776 | * and PME_En bits. The vconfig bits will be cleared during device |
777 | * capability initialization. |
778 | */ |
779 | p_setd(p: perm, PCI_PM_CTRL, |
780 | PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS, |
781 | write: ~(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS | |
782 | PCI_PM_CTRL_STATE_MASK)); |
783 | |
784 | return 0; |
785 | } |
786 | |
787 | static int vfio_vpd_config_write(struct vfio_pci_core_device *vdev, int pos, |
788 | int count, struct perm_bits *perm, |
789 | int offset, __le32 val) |
790 | { |
791 | struct pci_dev *pdev = vdev->pdev; |
792 | __le16 *paddr = (__le16 *)(vdev->vconfig + pos - offset + PCI_VPD_ADDR); |
793 | __le32 *pdata = (__le32 *)(vdev->vconfig + pos - offset + PCI_VPD_DATA); |
794 | u16 addr; |
795 | u32 data; |
796 | |
797 | /* |
798 | * Write through to emulation. If the write includes the upper byte |
799 | * of PCI_VPD_ADDR, then the PCI_VPD_ADDR_F bit is written and we |
800 | * have work to do. |
801 | */ |
802 | count = vfio_default_config_write(vdev, pos, count, perm, offset, val); |
803 | if (count < 0 || offset > PCI_VPD_ADDR + 1 || |
804 | offset + count <= PCI_VPD_ADDR + 1) |
805 | return count; |
806 | |
807 | addr = le16_to_cpu(*paddr); |
808 | |
809 | if (addr & PCI_VPD_ADDR_F) { |
810 | data = le32_to_cpu(*pdata); |
811 | if (pci_write_vpd(dev: pdev, pos: addr & ~PCI_VPD_ADDR_F, count: 4, buf: &data) != 4) |
812 | return count; |
813 | } else { |
814 | data = 0; |
815 | if (pci_read_vpd(dev: pdev, pos: addr, count: 4, buf: &data) < 0) |
816 | return count; |
817 | *pdata = cpu_to_le32(data); |
818 | } |
819 | |
820 | /* |
821 | * Toggle PCI_VPD_ADDR_F in the emulated PCI_VPD_ADDR register to |
822 | * signal completion. If an error occurs above, we assume that not |
823 | * toggling this bit will induce a driver timeout. |
824 | */ |
825 | addr ^= PCI_VPD_ADDR_F; |
826 | *paddr = cpu_to_le16(addr); |
827 | |
828 | return count; |
829 | } |
830 | |
831 | /* Permissions for Vital Product Data capability */ |
832 | static int __init init_pci_cap_vpd_perm(struct perm_bits *perm) |
833 | { |
834 | if (alloc_perm_bits(perm, size: pci_cap_length[PCI_CAP_ID_VPD])) |
835 | return -ENOMEM; |
836 | |
837 | perm->writefn = vfio_vpd_config_write; |
838 | |
839 | /* |
840 | * We always virtualize the next field so we can remove |
841 | * capabilities from the chain if we want to. |
842 | */ |
843 | p_setb(p: perm, PCI_CAP_LIST_NEXT, virt: (u8)ALL_VIRT, NO_WRITE); |
844 | |
845 | /* |
846 | * Both the address and data registers are virtualized to |
847 | * enable access through the pci_vpd_read/write functions |
848 | */ |
849 | p_setw(p: perm, PCI_VPD_ADDR, virt: (u16)ALL_VIRT, write: (u16)ALL_WRITE); |
850 | p_setd(p: perm, PCI_VPD_DATA, ALL_VIRT, ALL_WRITE); |
851 | |
852 | return 0; |
853 | } |
854 | |
855 | /* Permissions for PCI-X capability */ |
856 | static int __init init_pci_cap_pcix_perm(struct perm_bits *perm) |
857 | { |
858 | /* Alloc 24, but only 8 are used in v0 */ |
859 | if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2)) |
860 | return -ENOMEM; |
861 | |
862 | p_setb(p: perm, PCI_CAP_LIST_NEXT, virt: (u8)ALL_VIRT, NO_WRITE); |
863 | |
864 | p_setw(p: perm, PCI_X_CMD, NO_VIRT, write: (u16)ALL_WRITE); |
865 | p_setd(p: perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE); |
866 | return 0; |
867 | } |
868 | |
869 | static int vfio_exp_config_write(struct vfio_pci_core_device *vdev, int pos, |
870 | int count, struct perm_bits *perm, |
871 | int offset, __le32 val) |
872 | { |
873 | __le16 *ctrl = (__le16 *)(vdev->vconfig + pos - |
874 | offset + PCI_EXP_DEVCTL); |
875 | int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ; |
876 | |
877 | count = vfio_default_config_write(vdev, pos, count, perm, offset, val); |
878 | if (count < 0) |
879 | return count; |
880 | |
881 | /* |
882 | * The FLR bit is virtualized, if set and the device supports PCIe |
883 | * FLR, issue a reset_function. Regardless, clear the bit, the spec |
884 | * requires it to be always read as zero. NB, reset_function might |
885 | * not use a PCIe FLR, we don't have that level of granularity. |
886 | */ |
887 | if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) { |
888 | u32 cap; |
889 | int ret; |
890 | |
891 | *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR); |
892 | |
893 | ret = pci_user_read_config_dword(dev: vdev->pdev, |
894 | where: pos - offset + PCI_EXP_DEVCAP, |
895 | val: &cap); |
896 | |
897 | if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { |
898 | vfio_pci_zap_and_down_write_memory_lock(vdev); |
899 | pci_try_reset_function(dev: vdev->pdev); |
900 | up_write(sem: &vdev->memory_lock); |
901 | } |
902 | } |
903 | |
904 | /* |
905 | * MPS is virtualized to the user, writes do not change the physical |
906 | * register since determining a proper MPS value requires a system wide |
907 | * device view. The MRRS is largely independent of MPS, but since the |
908 | * user does not have that system-wide view, they might set a safe, but |
909 | * inefficiently low value. Here we allow writes through to hardware, |
910 | * but we set the floor to the physical device MPS setting, so that |
911 | * we can at least use full TLPs, as defined by the MPS value. |
912 | * |
913 | * NB, if any devices actually depend on an artificially low MRRS |
914 | * setting, this will need to be revisited, perhaps with a quirk |
915 | * though pcie_set_readrq(). |
916 | */ |
917 | if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) { |
918 | readrq = 128 << |
919 | ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12); |
920 | readrq = max(readrq, pcie_get_mps(vdev->pdev)); |
921 | |
922 | pcie_set_readrq(dev: vdev->pdev, rq: readrq); |
923 | } |
924 | |
925 | return count; |
926 | } |
927 | |
928 | /* Permissions for PCI Express capability */ |
929 | static int __init init_pci_cap_exp_perm(struct perm_bits *perm) |
930 | { |
931 | /* Alloc largest of possible sizes */ |
932 | if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2)) |
933 | return -ENOMEM; |
934 | |
935 | perm->writefn = vfio_exp_config_write; |
936 | |
937 | p_setb(p: perm, PCI_CAP_LIST_NEXT, virt: (u8)ALL_VIRT, NO_WRITE); |
938 | |
939 | /* |
940 | * Allow writes to device control fields, except devctl_phantom, |
941 | * which could confuse IOMMU, MPS, which can break communication |
942 | * with other physical devices, and the ARI bit in devctl2, which |
943 | * is set at probe time. FLR and MRRS get virtualized via our |
944 | * writefn. |
945 | */ |
946 | p_setw(p: perm, PCI_EXP_DEVCTL, |
947 | PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD | |
948 | PCI_EXP_DEVCTL_READRQ, write: ~PCI_EXP_DEVCTL_PHANTOM); |
949 | p_setw(p: perm, PCI_EXP_DEVCTL2, NO_VIRT, write: ~PCI_EXP_DEVCTL2_ARI); |
950 | return 0; |
951 | } |
952 | |
953 | static int vfio_af_config_write(struct vfio_pci_core_device *vdev, int pos, |
954 | int count, struct perm_bits *perm, |
955 | int offset, __le32 val) |
956 | { |
957 | u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL; |
958 | |
959 | count = vfio_default_config_write(vdev, pos, count, perm, offset, val); |
960 | if (count < 0) |
961 | return count; |
962 | |
963 | /* |
964 | * The FLR bit is virtualized, if set and the device supports AF |
965 | * FLR, issue a reset_function. Regardless, clear the bit, the spec |
966 | * requires it to be always read as zero. NB, reset_function might |
967 | * not use an AF FLR, we don't have that level of granularity. |
968 | */ |
969 | if (*ctrl & PCI_AF_CTRL_FLR) { |
970 | u8 cap; |
971 | int ret; |
972 | |
973 | *ctrl &= ~PCI_AF_CTRL_FLR; |
974 | |
975 | ret = pci_user_read_config_byte(dev: vdev->pdev, |
976 | where: pos - offset + PCI_AF_CAP, |
977 | val: &cap); |
978 | |
979 | if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { |
980 | vfio_pci_zap_and_down_write_memory_lock(vdev); |
981 | pci_try_reset_function(dev: vdev->pdev); |
982 | up_write(sem: &vdev->memory_lock); |
983 | } |
984 | } |
985 | |
986 | return count; |
987 | } |
988 | |
989 | /* Permissions for Advanced Function capability */ |
990 | static int __init init_pci_cap_af_perm(struct perm_bits *perm) |
991 | { |
992 | if (alloc_perm_bits(perm, size: pci_cap_length[PCI_CAP_ID_AF])) |
993 | return -ENOMEM; |
994 | |
995 | perm->writefn = vfio_af_config_write; |
996 | |
997 | p_setb(p: perm, PCI_CAP_LIST_NEXT, virt: (u8)ALL_VIRT, NO_WRITE); |
998 | p_setb(p: perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR); |
999 | return 0; |
1000 | } |
1001 | |
1002 | /* Permissions for Advanced Error Reporting extended capability */ |
1003 | static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm) |
1004 | { |
1005 | u32 mask; |
1006 | |
1007 | if (alloc_perm_bits(perm, size: pci_ext_cap_length[PCI_EXT_CAP_ID_ERR])) |
1008 | return -ENOMEM; |
1009 | |
1010 | /* |
1011 | * Virtualize the first dword of all express capabilities |
1012 | * because it includes the next pointer. This lets us later |
1013 | * remove capabilities from the chain if we need to. |
1014 | */ |
1015 | p_setd(p: perm, off: 0, ALL_VIRT, NO_WRITE); |
1016 | |
1017 | /* Writable bits mask */ |
1018 | mask = PCI_ERR_UNC_UND | /* Undefined */ |
1019 | PCI_ERR_UNC_DLP | /* Data Link Protocol */ |
1020 | PCI_ERR_UNC_SURPDN | /* Surprise Down */ |
1021 | PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */ |
1022 | PCI_ERR_UNC_FCP | /* Flow Control Protocol */ |
1023 | PCI_ERR_UNC_COMP_TIME | /* Completion Timeout */ |
1024 | PCI_ERR_UNC_COMP_ABORT | /* Completer Abort */ |
1025 | PCI_ERR_UNC_UNX_COMP | /* Unexpected Completion */ |
1026 | PCI_ERR_UNC_RX_OVER | /* Receiver Overflow */ |
1027 | PCI_ERR_UNC_MALF_TLP | /* Malformed TLP */ |
1028 | PCI_ERR_UNC_ECRC | /* ECRC Error Status */ |
1029 | PCI_ERR_UNC_UNSUP | /* Unsupported Request */ |
1030 | PCI_ERR_UNC_ACSV | /* ACS Violation */ |
1031 | PCI_ERR_UNC_INTN | /* internal error */ |
1032 | PCI_ERR_UNC_MCBTLP | /* MC blocked TLP */ |
1033 | PCI_ERR_UNC_ATOMEG | /* Atomic egress blocked */ |
1034 | PCI_ERR_UNC_TLPPRE; /* TLP prefix blocked */ |
1035 | p_setd(p: perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, write: mask); |
1036 | p_setd(p: perm, PCI_ERR_UNCOR_MASK, NO_VIRT, write: mask); |
1037 | p_setd(p: perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, write: mask); |
1038 | |
1039 | mask = PCI_ERR_COR_RCVR | /* Receiver Error Status */ |
1040 | PCI_ERR_COR_BAD_TLP | /* Bad TLP Status */ |
1041 | PCI_ERR_COR_BAD_DLLP | /* Bad DLLP Status */ |
1042 | PCI_ERR_COR_REP_ROLL | /* REPLAY_NUM Rollover */ |
1043 | PCI_ERR_COR_REP_TIMER | /* Replay Timer Timeout */ |
1044 | PCI_ERR_COR_ADV_NFAT | /* Advisory Non-Fatal */ |
1045 | PCI_ERR_COR_INTERNAL | /* Corrected Internal */ |
1046 | PCI_ERR_COR_LOG_OVER; /* Header Log Overflow */ |
1047 | p_setd(p: perm, PCI_ERR_COR_STATUS, NO_VIRT, write: mask); |
1048 | p_setd(p: perm, PCI_ERR_COR_MASK, NO_VIRT, write: mask); |
1049 | |
1050 | mask = PCI_ERR_CAP_ECRC_GENE | /* ECRC Generation Enable */ |
1051 | PCI_ERR_CAP_ECRC_CHKE; /* ECRC Check Enable */ |
1052 | p_setd(p: perm, PCI_ERR_CAP, NO_VIRT, write: mask); |
1053 | return 0; |
1054 | } |
1055 | |
1056 | /* Permissions for Power Budgeting extended capability */ |
1057 | static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm) |
1058 | { |
1059 | if (alloc_perm_bits(perm, size: pci_ext_cap_length[PCI_EXT_CAP_ID_PWR])) |
1060 | return -ENOMEM; |
1061 | |
1062 | p_setd(p: perm, off: 0, ALL_VIRT, NO_WRITE); |
1063 | |
1064 | /* Writing the data selector is OK, the info is still read-only */ |
1065 | p_setb(p: perm, PCI_PWR_DATA, NO_VIRT, write: (u8)ALL_WRITE); |
1066 | return 0; |
1067 | } |
1068 | |
1069 | /* |
1070 | * Initialize the shared permission tables |
1071 | */ |
1072 | void vfio_pci_uninit_perm_bits(void) |
1073 | { |
1074 | free_perm_bits(perm: &cap_perms[PCI_CAP_ID_BASIC]); |
1075 | |
1076 | free_perm_bits(perm: &cap_perms[PCI_CAP_ID_PM]); |
1077 | free_perm_bits(perm: &cap_perms[PCI_CAP_ID_VPD]); |
1078 | free_perm_bits(perm: &cap_perms[PCI_CAP_ID_PCIX]); |
1079 | free_perm_bits(perm: &cap_perms[PCI_CAP_ID_EXP]); |
1080 | free_perm_bits(perm: &cap_perms[PCI_CAP_ID_AF]); |
1081 | |
1082 | free_perm_bits(perm: &ecap_perms[PCI_EXT_CAP_ID_ERR]); |
1083 | free_perm_bits(perm: &ecap_perms[PCI_EXT_CAP_ID_PWR]); |
1084 | } |
1085 | |
1086 | int __init vfio_pci_init_perm_bits(void) |
1087 | { |
1088 | int ret; |
1089 | |
1090 | /* Basic config space */ |
1091 | ret = init_pci_cap_basic_perm(perm: &cap_perms[PCI_CAP_ID_BASIC]); |
1092 | |
1093 | /* Capabilities */ |
1094 | ret |= init_pci_cap_pm_perm(perm: &cap_perms[PCI_CAP_ID_PM]); |
1095 | ret |= init_pci_cap_vpd_perm(perm: &cap_perms[PCI_CAP_ID_VPD]); |
1096 | ret |= init_pci_cap_pcix_perm(perm: &cap_perms[PCI_CAP_ID_PCIX]); |
1097 | cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write; |
1098 | ret |= init_pci_cap_exp_perm(perm: &cap_perms[PCI_CAP_ID_EXP]); |
1099 | ret |= init_pci_cap_af_perm(perm: &cap_perms[PCI_CAP_ID_AF]); |
1100 | |
1101 | /* Extended capabilities */ |
1102 | ret |= init_pci_ext_cap_err_perm(perm: &ecap_perms[PCI_EXT_CAP_ID_ERR]); |
1103 | ret |= init_pci_ext_cap_pwr_perm(perm: &ecap_perms[PCI_EXT_CAP_ID_PWR]); |
1104 | ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write; |
1105 | ecap_perms[PCI_EXT_CAP_ID_DVSEC].writefn = vfio_raw_config_write; |
1106 | |
1107 | if (ret) |
1108 | vfio_pci_uninit_perm_bits(); |
1109 | |
1110 | return ret; |
1111 | } |
1112 | |
1113 | static int vfio_find_cap_start(struct vfio_pci_core_device *vdev, int pos) |
1114 | { |
1115 | u8 cap; |
1116 | int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE : |
1117 | PCI_STD_HEADER_SIZEOF; |
1118 | cap = vdev->pci_config_map[pos]; |
1119 | |
1120 | if (cap == PCI_CAP_ID_BASIC) |
1121 | return 0; |
1122 | |
1123 | /* XXX Can we have to abutting capabilities of the same type? */ |
1124 | while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap) |
1125 | pos--; |
1126 | |
1127 | return pos; |
1128 | } |
1129 | |
1130 | static int vfio_msi_config_read(struct vfio_pci_core_device *vdev, int pos, |
1131 | int count, struct perm_bits *perm, |
1132 | int offset, __le32 *val) |
1133 | { |
1134 | /* Update max available queue size from msi_qmax */ |
1135 | if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) { |
1136 | __le16 *flags; |
1137 | int start; |
1138 | |
1139 | start = vfio_find_cap_start(vdev, pos); |
1140 | |
1141 | flags = (__le16 *)&vdev->vconfig[start]; |
1142 | |
1143 | *flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK); |
1144 | *flags |= cpu_to_le16(vdev->msi_qmax << 1); |
1145 | } |
1146 | |
1147 | return vfio_default_config_read(vdev, pos, count, perm, offset, val); |
1148 | } |
1149 | |
1150 | static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos, |
1151 | int count, struct perm_bits *perm, |
1152 | int offset, __le32 val) |
1153 | { |
1154 | count = vfio_default_config_write(vdev, pos, count, perm, offset, val); |
1155 | if (count < 0) |
1156 | return count; |
1157 | |
1158 | /* Fixup and write configured queue size and enable to hardware */ |
1159 | if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) { |
1160 | __le16 *pflags; |
1161 | u16 flags; |
1162 | int start, ret; |
1163 | |
1164 | start = vfio_find_cap_start(vdev, pos); |
1165 | |
1166 | pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS]; |
1167 | |
1168 | flags = le16_to_cpu(*pflags); |
1169 | |
1170 | /* MSI is enabled via ioctl */ |
1171 | if (vdev->irq_type != VFIO_PCI_MSI_IRQ_INDEX) |
1172 | flags &= ~PCI_MSI_FLAGS_ENABLE; |
1173 | |
1174 | /* Check queue size */ |
1175 | if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) { |
1176 | flags &= ~PCI_MSI_FLAGS_QSIZE; |
1177 | flags |= vdev->msi_qmax << 4; |
1178 | } |
1179 | |
1180 | /* Write back to virt and to hardware */ |
1181 | *pflags = cpu_to_le16(flags); |
1182 | ret = pci_user_write_config_word(dev: vdev->pdev, |
1183 | where: start + PCI_MSI_FLAGS, |
1184 | val: flags); |
1185 | if (ret) |
1186 | return ret; |
1187 | } |
1188 | |
1189 | return count; |
1190 | } |
1191 | |
1192 | /* |
1193 | * MSI determination is per-device, so this routine gets used beyond |
1194 | * initialization time. Don't add __init |
1195 | */ |
1196 | static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags) |
1197 | { |
1198 | if (alloc_perm_bits(perm, size: len)) |
1199 | return -ENOMEM; |
1200 | |
1201 | perm->readfn = vfio_msi_config_read; |
1202 | perm->writefn = vfio_msi_config_write; |
1203 | |
1204 | p_setb(p: perm, PCI_CAP_LIST_NEXT, virt: (u8)ALL_VIRT, NO_WRITE); |
1205 | |
1206 | /* |
1207 | * The upper byte of the control register is reserved, |
1208 | * just setup the lower byte. |
1209 | */ |
1210 | p_setb(p: perm, PCI_MSI_FLAGS, virt: (u8)ALL_VIRT, write: (u8)ALL_WRITE); |
1211 | p_setd(p: perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE); |
1212 | if (flags & PCI_MSI_FLAGS_64BIT) { |
1213 | p_setd(p: perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE); |
1214 | p_setw(p: perm, PCI_MSI_DATA_64, virt: (u16)ALL_VIRT, write: (u16)ALL_WRITE); |
1215 | if (flags & PCI_MSI_FLAGS_MASKBIT) { |
1216 | p_setd(p: perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE); |
1217 | p_setd(p: perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE); |
1218 | } |
1219 | } else { |
1220 | p_setw(p: perm, PCI_MSI_DATA_32, virt: (u16)ALL_VIRT, write: (u16)ALL_WRITE); |
1221 | if (flags & PCI_MSI_FLAGS_MASKBIT) { |
1222 | p_setd(p: perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE); |
1223 | p_setd(p: perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE); |
1224 | } |
1225 | } |
1226 | return 0; |
1227 | } |
1228 | |
1229 | /* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */ |
1230 | static int vfio_msi_cap_len(struct vfio_pci_core_device *vdev, u8 pos) |
1231 | { |
1232 | struct pci_dev *pdev = vdev->pdev; |
1233 | int len, ret; |
1234 | u16 flags; |
1235 | |
1236 | ret = pci_read_config_word(dev: pdev, where: pos + PCI_MSI_FLAGS, val: &flags); |
1237 | if (ret) |
1238 | return pcibios_err_to_errno(err: ret); |
1239 | |
1240 | len = 10; /* Minimum size */ |
1241 | if (flags & PCI_MSI_FLAGS_64BIT) |
1242 | len += 4; |
1243 | if (flags & PCI_MSI_FLAGS_MASKBIT) |
1244 | len += 10; |
1245 | |
1246 | if (vdev->msi_perm) |
1247 | return len; |
1248 | |
1249 | vdev->msi_perm = kmalloc(size: sizeof(struct perm_bits), GFP_KERNEL_ACCOUNT); |
1250 | if (!vdev->msi_perm) |
1251 | return -ENOMEM; |
1252 | |
1253 | ret = init_pci_cap_msi_perm(perm: vdev->msi_perm, len, flags); |
1254 | if (ret) { |
1255 | kfree(objp: vdev->msi_perm); |
1256 | return ret; |
1257 | } |
1258 | |
1259 | return len; |
1260 | } |
1261 | |
1262 | /* Determine extended capability length for VC (2 & 9) and MFVC */ |
1263 | static int vfio_vc_cap_len(struct vfio_pci_core_device *vdev, u16 pos) |
1264 | { |
1265 | struct pci_dev *pdev = vdev->pdev; |
1266 | u32 tmp; |
1267 | int ret, evcc, phases, vc_arb; |
1268 | int len = PCI_CAP_VC_BASE_SIZEOF; |
1269 | |
1270 | ret = pci_read_config_dword(dev: pdev, where: pos + PCI_VC_PORT_CAP1, val: &tmp); |
1271 | if (ret) |
1272 | return pcibios_err_to_errno(err: ret); |
1273 | |
1274 | evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */ |
1275 | ret = pci_read_config_dword(dev: pdev, where: pos + PCI_VC_PORT_CAP2, val: &tmp); |
1276 | if (ret) |
1277 | return pcibios_err_to_errno(err: ret); |
1278 | |
1279 | if (tmp & PCI_VC_CAP2_128_PHASE) |
1280 | phases = 128; |
1281 | else if (tmp & PCI_VC_CAP2_64_PHASE) |
1282 | phases = 64; |
1283 | else if (tmp & PCI_VC_CAP2_32_PHASE) |
1284 | phases = 32; |
1285 | else |
1286 | phases = 0; |
1287 | |
1288 | vc_arb = phases * 4; |
1289 | |
1290 | /* |
1291 | * Port arbitration tables are root & switch only; |
1292 | * function arbitration tables are function 0 only. |
1293 | * In either case, we'll never let user write them so |
1294 | * we don't care how big they are |
1295 | */ |
1296 | len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF; |
1297 | if (vc_arb) { |
1298 | len = round_up(len, 16); |
1299 | len += vc_arb / 8; |
1300 | } |
1301 | return len; |
1302 | } |
1303 | |
1304 | static int vfio_cap_len(struct vfio_pci_core_device *vdev, u8 cap, u8 pos) |
1305 | { |
1306 | struct pci_dev *pdev = vdev->pdev; |
1307 | u32 dword; |
1308 | u16 word; |
1309 | u8 byte; |
1310 | int ret; |
1311 | |
1312 | switch (cap) { |
1313 | case PCI_CAP_ID_MSI: |
1314 | return vfio_msi_cap_len(vdev, pos); |
1315 | case PCI_CAP_ID_PCIX: |
1316 | ret = pci_read_config_word(dev: pdev, where: pos + PCI_X_CMD, val: &word); |
1317 | if (ret) |
1318 | return pcibios_err_to_errno(err: ret); |
1319 | |
1320 | if (PCI_X_CMD_VERSION(word)) { |
1321 | if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) { |
1322 | /* Test for extended capabilities */ |
1323 | pci_read_config_dword(dev: pdev, PCI_CFG_SPACE_SIZE, |
1324 | val: &dword); |
1325 | vdev->extended_caps = (dword != 0); |
1326 | } |
1327 | return PCI_CAP_PCIX_SIZEOF_V2; |
1328 | } else |
1329 | return PCI_CAP_PCIX_SIZEOF_V0; |
1330 | case PCI_CAP_ID_VNDR: |
1331 | /* length follows next field */ |
1332 | ret = pci_read_config_byte(dev: pdev, where: pos + PCI_CAP_FLAGS, val: &byte); |
1333 | if (ret) |
1334 | return pcibios_err_to_errno(err: ret); |
1335 | |
1336 | return byte; |
1337 | case PCI_CAP_ID_EXP: |
1338 | if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) { |
1339 | /* Test for extended capabilities */ |
1340 | pci_read_config_dword(dev: pdev, PCI_CFG_SPACE_SIZE, val: &dword); |
1341 | vdev->extended_caps = (dword != 0); |
1342 | } |
1343 | |
1344 | /* length based on version and type */ |
1345 | if ((pcie_caps_reg(dev: pdev) & PCI_EXP_FLAGS_VERS) == 1) { |
1346 | if (pci_pcie_type(dev: pdev) == PCI_EXP_TYPE_RC_END) |
1347 | return 0xc; /* "All Devices" only, no link */ |
1348 | return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1; |
1349 | } else { |
1350 | if (pci_pcie_type(dev: pdev) == PCI_EXP_TYPE_RC_END) |
1351 | return 0x2c; /* No link */ |
1352 | return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2; |
1353 | } |
1354 | case PCI_CAP_ID_HT: |
1355 | ret = pci_read_config_byte(dev: pdev, where: pos + 3, val: &byte); |
1356 | if (ret) |
1357 | return pcibios_err_to_errno(err: ret); |
1358 | |
1359 | return (byte & HT_3BIT_CAP_MASK) ? |
1360 | HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG; |
1361 | case PCI_CAP_ID_SATA: |
1362 | ret = pci_read_config_byte(dev: pdev, where: pos + PCI_SATA_REGS, val: &byte); |
1363 | if (ret) |
1364 | return pcibios_err_to_errno(err: ret); |
1365 | |
1366 | byte &= PCI_SATA_REGS_MASK; |
1367 | if (byte == PCI_SATA_REGS_INLINE) |
1368 | return PCI_SATA_SIZEOF_LONG; |
1369 | else |
1370 | return PCI_SATA_SIZEOF_SHORT; |
1371 | default: |
1372 | pci_warn(pdev, "%s: unknown length for PCI cap %#x@%#x\n" , |
1373 | __func__, cap, pos); |
1374 | } |
1375 | |
1376 | return 0; |
1377 | } |
1378 | |
1379 | static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epos) |
1380 | { |
1381 | struct pci_dev *pdev = vdev->pdev; |
1382 | u8 byte; |
1383 | u32 dword; |
1384 | int ret; |
1385 | |
1386 | switch (ecap) { |
1387 | case PCI_EXT_CAP_ID_VNDR: |
1388 | ret = pci_read_config_dword(dev: pdev, where: epos + PCI_VSEC_HDR, val: &dword); |
1389 | if (ret) |
1390 | return pcibios_err_to_errno(err: ret); |
1391 | |
1392 | return dword >> PCI_VSEC_HDR_LEN_SHIFT; |
1393 | case PCI_EXT_CAP_ID_VC: |
1394 | case PCI_EXT_CAP_ID_VC9: |
1395 | case PCI_EXT_CAP_ID_MFVC: |
1396 | return vfio_vc_cap_len(vdev, pos: epos); |
1397 | case PCI_EXT_CAP_ID_ACS: |
1398 | ret = pci_read_config_byte(dev: pdev, where: epos + PCI_ACS_CAP, val: &byte); |
1399 | if (ret) |
1400 | return pcibios_err_to_errno(err: ret); |
1401 | |
1402 | if (byte & PCI_ACS_EC) { |
1403 | int bits; |
1404 | |
1405 | ret = pci_read_config_byte(dev: pdev, |
1406 | where: epos + PCI_ACS_EGRESS_BITS, |
1407 | val: &byte); |
1408 | if (ret) |
1409 | return pcibios_err_to_errno(err: ret); |
1410 | |
1411 | bits = byte ? round_up(byte, 32) : 256; |
1412 | return 8 + (bits / 8); |
1413 | } |
1414 | return 8; |
1415 | |
1416 | case PCI_EXT_CAP_ID_REBAR: |
1417 | ret = pci_read_config_byte(dev: pdev, where: epos + PCI_REBAR_CTRL, val: &byte); |
1418 | if (ret) |
1419 | return pcibios_err_to_errno(err: ret); |
1420 | |
1421 | byte &= PCI_REBAR_CTRL_NBAR_MASK; |
1422 | byte >>= PCI_REBAR_CTRL_NBAR_SHIFT; |
1423 | |
1424 | return 4 + (byte * 8); |
1425 | case PCI_EXT_CAP_ID_DPA: |
1426 | ret = pci_read_config_byte(dev: pdev, where: epos + PCI_DPA_CAP, val: &byte); |
1427 | if (ret) |
1428 | return pcibios_err_to_errno(err: ret); |
1429 | |
1430 | byte &= PCI_DPA_CAP_SUBSTATE_MASK; |
1431 | return PCI_DPA_BASE_SIZEOF + byte + 1; |
1432 | case PCI_EXT_CAP_ID_TPH: |
1433 | ret = pci_read_config_dword(dev: pdev, where: epos + PCI_TPH_CAP, val: &dword); |
1434 | if (ret) |
1435 | return pcibios_err_to_errno(err: ret); |
1436 | |
1437 | if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) { |
1438 | int sts; |
1439 | |
1440 | sts = dword & PCI_TPH_CAP_ST_MASK; |
1441 | sts >>= PCI_TPH_CAP_ST_SHIFT; |
1442 | return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2; |
1443 | } |
1444 | return PCI_TPH_BASE_SIZEOF; |
1445 | case PCI_EXT_CAP_ID_DVSEC: |
1446 | ret = pci_read_config_dword(dev: pdev, where: epos + PCI_DVSEC_HEADER1, val: &dword); |
1447 | if (ret) |
1448 | return pcibios_err_to_errno(err: ret); |
1449 | return PCI_DVSEC_HEADER1_LEN(dword); |
1450 | default: |
1451 | pci_warn(pdev, "%s: unknown length for PCI ecap %#x@%#x\n" , |
1452 | __func__, ecap, epos); |
1453 | } |
1454 | |
1455 | return 0; |
1456 | } |
1457 | |
1458 | static void vfio_update_pm_vconfig_bytes(struct vfio_pci_core_device *vdev, |
1459 | int offset) |
1460 | { |
1461 | __le16 *pmc = (__le16 *)&vdev->vconfig[offset + PCI_PM_PMC]; |
1462 | __le16 *ctrl = (__le16 *)&vdev->vconfig[offset + PCI_PM_CTRL]; |
1463 | |
1464 | /* Clear vconfig PME_Support, PME_Status, and PME_En bits */ |
1465 | *pmc &= ~cpu_to_le16(PCI_PM_CAP_PME_MASK); |
1466 | *ctrl &= ~cpu_to_le16(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS); |
1467 | } |
1468 | |
1469 | static int vfio_fill_vconfig_bytes(struct vfio_pci_core_device *vdev, |
1470 | int offset, int size) |
1471 | { |
1472 | struct pci_dev *pdev = vdev->pdev; |
1473 | int ret = 0; |
1474 | |
1475 | /* |
1476 | * We try to read physical config space in the largest chunks |
1477 | * we can, assuming that all of the fields support dword access. |
1478 | * pci_save_state() makes this same assumption and seems to do ok. |
1479 | */ |
1480 | while (size) { |
1481 | int filled; |
1482 | |
1483 | if (size >= 4 && !(offset % 4)) { |
1484 | __le32 *dwordp = (__le32 *)&vdev->vconfig[offset]; |
1485 | u32 dword; |
1486 | |
1487 | ret = pci_read_config_dword(dev: pdev, where: offset, val: &dword); |
1488 | if (ret) |
1489 | return ret; |
1490 | *dwordp = cpu_to_le32(dword); |
1491 | filled = 4; |
1492 | } else if (size >= 2 && !(offset % 2)) { |
1493 | __le16 *wordp = (__le16 *)&vdev->vconfig[offset]; |
1494 | u16 word; |
1495 | |
1496 | ret = pci_read_config_word(dev: pdev, where: offset, val: &word); |
1497 | if (ret) |
1498 | return ret; |
1499 | *wordp = cpu_to_le16(word); |
1500 | filled = 2; |
1501 | } else { |
1502 | u8 *byte = &vdev->vconfig[offset]; |
1503 | ret = pci_read_config_byte(dev: pdev, where: offset, val: byte); |
1504 | if (ret) |
1505 | return ret; |
1506 | filled = 1; |
1507 | } |
1508 | |
1509 | offset += filled; |
1510 | size -= filled; |
1511 | } |
1512 | |
1513 | return ret; |
1514 | } |
1515 | |
1516 | static int vfio_cap_init(struct vfio_pci_core_device *vdev) |
1517 | { |
1518 | struct pci_dev *pdev = vdev->pdev; |
1519 | u8 *map = vdev->pci_config_map; |
1520 | u16 status; |
1521 | u8 pos, *prev, cap; |
1522 | int loops, ret, caps = 0; |
1523 | |
1524 | /* Any capabilities? */ |
1525 | ret = pci_read_config_word(dev: pdev, PCI_STATUS, val: &status); |
1526 | if (ret) |
1527 | return ret; |
1528 | |
1529 | if (!(status & PCI_STATUS_CAP_LIST)) |
1530 | return 0; /* Done */ |
1531 | |
1532 | ret = pci_read_config_byte(dev: pdev, PCI_CAPABILITY_LIST, val: &pos); |
1533 | if (ret) |
1534 | return ret; |
1535 | |
1536 | /* Mark the previous position in case we want to skip a capability */ |
1537 | prev = &vdev->vconfig[PCI_CAPABILITY_LIST]; |
1538 | |
1539 | /* We can bound our loop, capabilities are dword aligned */ |
1540 | loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF; |
1541 | while (pos && loops--) { |
1542 | u8 next; |
1543 | int i, len = 0; |
1544 | |
1545 | ret = pci_read_config_byte(dev: pdev, where: pos, val: &cap); |
1546 | if (ret) |
1547 | return ret; |
1548 | |
1549 | ret = pci_read_config_byte(dev: pdev, |
1550 | where: pos + PCI_CAP_LIST_NEXT, val: &next); |
1551 | if (ret) |
1552 | return ret; |
1553 | |
1554 | /* |
1555 | * ID 0 is a NULL capability, conflicting with our fake |
1556 | * PCI_CAP_ID_BASIC. As it has no content, consider it |
1557 | * hidden for now. |
1558 | */ |
1559 | if (cap && cap <= PCI_CAP_ID_MAX) { |
1560 | len = pci_cap_length[cap]; |
1561 | if (len == 0xFF) { /* Variable length */ |
1562 | len = vfio_cap_len(vdev, cap, pos); |
1563 | if (len < 0) |
1564 | return len; |
1565 | } |
1566 | } |
1567 | |
1568 | if (!len) { |
1569 | pci_dbg(pdev, "%s: hiding cap %#x@%#x\n" , __func__, |
1570 | cap, pos); |
1571 | *prev = next; |
1572 | pos = next; |
1573 | continue; |
1574 | } |
1575 | |
1576 | /* Sanity check, do we overlap other capabilities? */ |
1577 | for (i = 0; i < len; i++) { |
1578 | if (likely(map[pos + i] == PCI_CAP_ID_INVALID)) |
1579 | continue; |
1580 | |
1581 | pci_warn(pdev, "%s: PCI config conflict @%#x, was cap %#x now cap %#x\n" , |
1582 | __func__, pos + i, map[pos + i], cap); |
1583 | } |
1584 | |
1585 | BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT); |
1586 | |
1587 | memset(map + pos, cap, len); |
1588 | ret = vfio_fill_vconfig_bytes(vdev, offset: pos, size: len); |
1589 | if (ret) |
1590 | return ret; |
1591 | |
1592 | if (cap == PCI_CAP_ID_PM) |
1593 | vfio_update_pm_vconfig_bytes(vdev, offset: pos); |
1594 | |
1595 | prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT]; |
1596 | pos = next; |
1597 | caps++; |
1598 | } |
1599 | |
1600 | /* If we didn't fill any capabilities, clear the status flag */ |
1601 | if (!caps) { |
1602 | __le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS]; |
1603 | *vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST); |
1604 | } |
1605 | |
1606 | return 0; |
1607 | } |
1608 | |
1609 | static int vfio_ecap_init(struct vfio_pci_core_device *vdev) |
1610 | { |
1611 | struct pci_dev *pdev = vdev->pdev; |
1612 | u8 *map = vdev->pci_config_map; |
1613 | u16 epos; |
1614 | __le32 *prev = NULL; |
1615 | int loops, ret, ecaps = 0; |
1616 | |
1617 | if (!vdev->extended_caps) |
1618 | return 0; |
1619 | |
1620 | epos = PCI_CFG_SPACE_SIZE; |
1621 | |
1622 | loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF; |
1623 | |
1624 | while (loops-- && epos >= PCI_CFG_SPACE_SIZE) { |
1625 | u32 ; |
1626 | u16 ecap; |
1627 | int i, len = 0; |
1628 | bool hidden = false; |
1629 | |
1630 | ret = pci_read_config_dword(dev: pdev, where: epos, val: &header); |
1631 | if (ret) |
1632 | return ret; |
1633 | |
1634 | ecap = PCI_EXT_CAP_ID(header); |
1635 | |
1636 | if (ecap <= PCI_EXT_CAP_ID_MAX) { |
1637 | len = pci_ext_cap_length[ecap]; |
1638 | if (len == 0xFF) { |
1639 | len = vfio_ext_cap_len(vdev, ecap, epos); |
1640 | if (len < 0) |
1641 | return len; |
1642 | } |
1643 | } |
1644 | |
1645 | if (!len) { |
1646 | pci_dbg(pdev, "%s: hiding ecap %#x@%#x\n" , |
1647 | __func__, ecap, epos); |
1648 | |
1649 | /* If not the first in the chain, we can skip over it */ |
1650 | if (prev) { |
1651 | u32 val = epos = PCI_EXT_CAP_NEXT(header); |
1652 | *prev &= cpu_to_le32(~(0xffcU << 20)); |
1653 | *prev |= cpu_to_le32(val << 20); |
1654 | continue; |
1655 | } |
1656 | |
1657 | /* |
1658 | * Otherwise, fill in a placeholder, the direct |
1659 | * readfn will virtualize this automatically |
1660 | */ |
1661 | len = PCI_CAP_SIZEOF; |
1662 | hidden = true; |
1663 | } |
1664 | |
1665 | for (i = 0; i < len; i++) { |
1666 | if (likely(map[epos + i] == PCI_CAP_ID_INVALID)) |
1667 | continue; |
1668 | |
1669 | pci_warn(pdev, "%s: PCI config conflict @%#x, was ecap %#x now ecap %#x\n" , |
1670 | __func__, epos + i, map[epos + i], ecap); |
1671 | } |
1672 | |
1673 | /* |
1674 | * Even though ecap is 2 bytes, we're currently a long way |
1675 | * from exceeding 1 byte capabilities. If we ever make it |
1676 | * up to 0xFE we'll need to up this to a two-byte, byte map. |
1677 | */ |
1678 | BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT); |
1679 | |
1680 | memset(map + epos, ecap, len); |
1681 | ret = vfio_fill_vconfig_bytes(vdev, offset: epos, size: len); |
1682 | if (ret) |
1683 | return ret; |
1684 | |
1685 | /* |
1686 | * If we're just using this capability to anchor the list, |
1687 | * hide the real ID. Only count real ecaps. XXX PCI spec |
1688 | * indicates to use cap id = 0, version = 0, next = 0 if |
1689 | * ecaps are absent, hope users check all the way to next. |
1690 | */ |
1691 | if (hidden) |
1692 | *(__le32 *)&vdev->vconfig[epos] &= |
1693 | cpu_to_le32((0xffcU << 20)); |
1694 | else |
1695 | ecaps++; |
1696 | |
1697 | prev = (__le32 *)&vdev->vconfig[epos]; |
1698 | epos = PCI_EXT_CAP_NEXT(header); |
1699 | } |
1700 | |
1701 | if (!ecaps) |
1702 | *(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0; |
1703 | |
1704 | return 0; |
1705 | } |
1706 | |
1707 | /* |
1708 | * Nag about hardware bugs, hopefully to have vendors fix them, but at least |
1709 | * to collect a list of dependencies for the VF INTx pin quirk below. |
1710 | */ |
1711 | static const struct pci_device_id known_bogus_vf_intx_pin[] = { |
1712 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) }, |
1713 | {} |
1714 | }; |
1715 | |
1716 | /* |
1717 | * For each device we allocate a pci_config_map that indicates the |
1718 | * capability occupying each dword and thus the struct perm_bits we |
1719 | * use for read and write. We also allocate a virtualized config |
1720 | * space which tracks reads and writes to bits that we emulate for |
1721 | * the user. Initial values filled from device. |
1722 | * |
1723 | * Using shared struct perm_bits between all vfio-pci devices saves |
1724 | * us from allocating cfg_size buffers for virt and write for every |
1725 | * device. We could remove vconfig and allocate individual buffers |
1726 | * for each area requiring emulated bits, but the array of pointers |
1727 | * would be comparable in size (at least for standard config space). |
1728 | */ |
1729 | int vfio_config_init(struct vfio_pci_core_device *vdev) |
1730 | { |
1731 | struct pci_dev *pdev = vdev->pdev; |
1732 | u8 *map, *vconfig; |
1733 | int ret; |
1734 | |
1735 | /* |
1736 | * Config space, caps and ecaps are all dword aligned, so we could |
1737 | * use one byte per dword to record the type. However, there are |
1738 | * no requirements on the length of a capability, so the gap between |
1739 | * capabilities needs byte granularity. |
1740 | */ |
1741 | map = kmalloc(size: pdev->cfg_size, GFP_KERNEL_ACCOUNT); |
1742 | if (!map) |
1743 | return -ENOMEM; |
1744 | |
1745 | vconfig = kmalloc(size: pdev->cfg_size, GFP_KERNEL_ACCOUNT); |
1746 | if (!vconfig) { |
1747 | kfree(objp: map); |
1748 | return -ENOMEM; |
1749 | } |
1750 | |
1751 | vdev->pci_config_map = map; |
1752 | vdev->vconfig = vconfig; |
1753 | |
1754 | memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF); |
1755 | memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID, |
1756 | pdev->cfg_size - PCI_STD_HEADER_SIZEOF); |
1757 | |
1758 | ret = vfio_fill_vconfig_bytes(vdev, offset: 0, PCI_STD_HEADER_SIZEOF); |
1759 | if (ret) |
1760 | goto out; |
1761 | |
1762 | vdev->bardirty = true; |
1763 | |
1764 | /* |
1765 | * XXX can we just pci_load_saved_state/pci_restore_state? |
1766 | * may need to rebuild vconfig after that |
1767 | */ |
1768 | |
1769 | /* For restore after reset */ |
1770 | vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]); |
1771 | vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]); |
1772 | vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]); |
1773 | vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]); |
1774 | vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]); |
1775 | vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]); |
1776 | vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]); |
1777 | |
1778 | if (pdev->is_virtfn) { |
1779 | *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor); |
1780 | *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device); |
1781 | |
1782 | /* |
1783 | * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register |
1784 | * does not apply to VFs and VFs must implement this register |
1785 | * as read-only with value zero. Userspace is not readily able |
1786 | * to identify whether a device is a VF and thus that the pin |
1787 | * definition on the device is bogus should it violate this |
1788 | * requirement. We already virtualize the pin register for |
1789 | * other purposes, so we simply need to replace the bogus value |
1790 | * and consider VFs when we determine INTx IRQ count. |
1791 | */ |
1792 | if (vconfig[PCI_INTERRUPT_PIN] && |
1793 | !pci_match_id(ids: known_bogus_vf_intx_pin, dev: pdev)) |
1794 | pci_warn(pdev, |
1795 | "Hardware bug: VF reports bogus INTx pin %d\n" , |
1796 | vconfig[PCI_INTERRUPT_PIN]); |
1797 | |
1798 | vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ |
1799 | } |
1800 | if (pdev->no_command_memory) { |
1801 | /* |
1802 | * VFs and devices that set pdev->no_command_memory do not |
1803 | * implement the memory enable bit of the COMMAND register |
1804 | * therefore we'll not have it set in our initial copy of |
1805 | * config space after pci_enable_device(). For consistency |
1806 | * with PFs, set the virtual enable bit here. |
1807 | */ |
1808 | *(__le16 *)&vconfig[PCI_COMMAND] |= |
1809 | cpu_to_le16(PCI_COMMAND_MEMORY); |
1810 | } |
1811 | |
1812 | if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) |
1813 | vconfig[PCI_INTERRUPT_PIN] = 0; |
1814 | |
1815 | ret = vfio_cap_init(vdev); |
1816 | if (ret) |
1817 | goto out; |
1818 | |
1819 | ret = vfio_ecap_init(vdev); |
1820 | if (ret) |
1821 | goto out; |
1822 | |
1823 | return 0; |
1824 | |
1825 | out: |
1826 | kfree(objp: map); |
1827 | vdev->pci_config_map = NULL; |
1828 | kfree(objp: vconfig); |
1829 | vdev->vconfig = NULL; |
1830 | return pcibios_err_to_errno(err: ret); |
1831 | } |
1832 | |
1833 | void vfio_config_free(struct vfio_pci_core_device *vdev) |
1834 | { |
1835 | kfree(objp: vdev->vconfig); |
1836 | vdev->vconfig = NULL; |
1837 | kfree(objp: vdev->pci_config_map); |
1838 | vdev->pci_config_map = NULL; |
1839 | if (vdev->msi_perm) { |
1840 | free_perm_bits(perm: vdev->msi_perm); |
1841 | kfree(objp: vdev->msi_perm); |
1842 | vdev->msi_perm = NULL; |
1843 | } |
1844 | } |
1845 | |
1846 | /* |
1847 | * Find the remaining number of bytes in a dword that match the given |
1848 | * position. Stop at either the end of the capability or the dword boundary. |
1849 | */ |
1850 | static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_core_device *vdev, |
1851 | loff_t pos) |
1852 | { |
1853 | u8 cap = vdev->pci_config_map[pos]; |
1854 | size_t i; |
1855 | |
1856 | for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++) |
1857 | /* nop */; |
1858 | |
1859 | return i; |
1860 | } |
1861 | |
1862 | static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user *buf, |
1863 | size_t count, loff_t *ppos, bool iswrite) |
1864 | { |
1865 | struct pci_dev *pdev = vdev->pdev; |
1866 | struct perm_bits *perm; |
1867 | __le32 val = 0; |
1868 | int cap_start = 0, offset; |
1869 | u8 cap_id; |
1870 | ssize_t ret; |
1871 | |
1872 | if (*ppos < 0 || *ppos >= pdev->cfg_size || |
1873 | *ppos + count > pdev->cfg_size) |
1874 | return -EFAULT; |
1875 | |
1876 | /* |
1877 | * Chop accesses into aligned chunks containing no more than a |
1878 | * single capability. Caller increments to the next chunk. |
1879 | */ |
1880 | count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos)); |
1881 | if (count >= 4 && !(*ppos % 4)) |
1882 | count = 4; |
1883 | else if (count >= 2 && !(*ppos % 2)) |
1884 | count = 2; |
1885 | else |
1886 | count = 1; |
1887 | |
1888 | ret = count; |
1889 | |
1890 | cap_id = vdev->pci_config_map[*ppos]; |
1891 | |
1892 | if (cap_id == PCI_CAP_ID_INVALID) { |
1893 | perm = &unassigned_perms; |
1894 | cap_start = *ppos; |
1895 | } else if (cap_id == PCI_CAP_ID_INVALID_VIRT) { |
1896 | perm = &virt_perms; |
1897 | cap_start = *ppos; |
1898 | } else { |
1899 | if (*ppos >= PCI_CFG_SPACE_SIZE) { |
1900 | WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX); |
1901 | |
1902 | perm = &ecap_perms[cap_id]; |
1903 | cap_start = vfio_find_cap_start(vdev, pos: *ppos); |
1904 | } else { |
1905 | WARN_ON(cap_id > PCI_CAP_ID_MAX); |
1906 | |
1907 | perm = &cap_perms[cap_id]; |
1908 | |
1909 | if (cap_id == PCI_CAP_ID_MSI) |
1910 | perm = vdev->msi_perm; |
1911 | |
1912 | if (cap_id > PCI_CAP_ID_BASIC) |
1913 | cap_start = vfio_find_cap_start(vdev, pos: *ppos); |
1914 | } |
1915 | } |
1916 | |
1917 | WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC); |
1918 | WARN_ON(cap_start > *ppos); |
1919 | |
1920 | offset = *ppos - cap_start; |
1921 | |
1922 | if (iswrite) { |
1923 | if (!perm->writefn) |
1924 | return ret; |
1925 | |
1926 | if (copy_from_user(to: &val, from: buf, n: count)) |
1927 | return -EFAULT; |
1928 | |
1929 | ret = perm->writefn(vdev, *ppos, count, perm, offset, val); |
1930 | } else { |
1931 | if (perm->readfn) { |
1932 | ret = perm->readfn(vdev, *ppos, count, |
1933 | perm, offset, &val); |
1934 | if (ret < 0) |
1935 | return ret; |
1936 | } |
1937 | |
1938 | if (copy_to_user(to: buf, from: &val, n: count)) |
1939 | return -EFAULT; |
1940 | } |
1941 | |
1942 | return ret; |
1943 | } |
1944 | |
1945 | ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf, |
1946 | size_t count, loff_t *ppos, bool iswrite) |
1947 | { |
1948 | size_t done = 0; |
1949 | int ret = 0; |
1950 | loff_t pos = *ppos; |
1951 | |
1952 | pos &= VFIO_PCI_OFFSET_MASK; |
1953 | |
1954 | while (count) { |
1955 | ret = vfio_config_do_rw(vdev, buf, count, ppos: &pos, iswrite); |
1956 | if (ret < 0) |
1957 | return ret; |
1958 | |
1959 | count -= ret; |
1960 | done += ret; |
1961 | buf += ret; |
1962 | pos += ret; |
1963 | } |
1964 | |
1965 | *ppos += done; |
1966 | |
1967 | return done; |
1968 | } |
1969 | |