1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Virtio PCI driver - common functionality for all device versions |
4 | * |
5 | * This module allows virtio devices to be used over a virtual PCI device. |
6 | * This can be used with QEMU based VMMs like KVM or Xen. |
7 | * |
8 | * Copyright IBM Corp. 2007 |
9 | * Copyright Red Hat, Inc. 2014 |
10 | * |
11 | * Authors: |
12 | * Anthony Liguori <aliguori@us.ibm.com> |
13 | * Rusty Russell <rusty@rustcorp.com.au> |
14 | * Michael S. Tsirkin <mst@redhat.com> |
15 | */ |
16 | |
17 | #include "virtio_pci_common.h" |
18 | |
19 | static bool force_legacy = false; |
20 | |
21 | #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) |
22 | module_param(force_legacy, bool, 0444); |
23 | MODULE_PARM_DESC(force_legacy, |
24 | "Force legacy mode for transitional virtio 1 devices" ); |
25 | #endif |
26 | |
27 | /* wait for pending irq handlers */ |
28 | void vp_synchronize_vectors(struct virtio_device *vdev) |
29 | { |
30 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
31 | int i; |
32 | |
33 | if (vp_dev->intx_enabled) |
34 | synchronize_irq(irq: vp_dev->pci_dev->irq); |
35 | |
36 | for (i = 0; i < vp_dev->msix_vectors; ++i) |
37 | synchronize_irq(irq: pci_irq_vector(dev: vp_dev->pci_dev, nr: i)); |
38 | } |
39 | |
40 | /* the notify function used when creating a virt queue */ |
41 | bool vp_notify(struct virtqueue *vq) |
42 | { |
43 | /* we write the queue's selector into the notification register to |
44 | * signal the other end */ |
45 | iowrite16(vq->index, (void __iomem *)vq->priv); |
46 | return true; |
47 | } |
48 | |
49 | /* Handle a configuration change: Tell driver if it wants to know. */ |
50 | static irqreturn_t vp_config_changed(int irq, void *opaque) |
51 | { |
52 | struct virtio_pci_device *vp_dev = opaque; |
53 | |
54 | virtio_config_changed(dev: &vp_dev->vdev); |
55 | return IRQ_HANDLED; |
56 | } |
57 | |
58 | /* Notify all virtqueues on an interrupt. */ |
59 | static irqreturn_t vp_vring_interrupt(int irq, void *opaque) |
60 | { |
61 | struct virtio_pci_device *vp_dev = opaque; |
62 | struct virtio_pci_vq_info *info; |
63 | irqreturn_t ret = IRQ_NONE; |
64 | unsigned long flags; |
65 | |
66 | spin_lock_irqsave(&vp_dev->lock, flags); |
67 | list_for_each_entry(info, &vp_dev->virtqueues, node) { |
68 | if (vring_interrupt(irq, vq: info->vq) == IRQ_HANDLED) |
69 | ret = IRQ_HANDLED; |
70 | } |
71 | spin_unlock_irqrestore(lock: &vp_dev->lock, flags); |
72 | |
73 | return ret; |
74 | } |
75 | |
76 | /* A small wrapper to also acknowledge the interrupt when it's handled. |
77 | * I really need an EIO hook for the vring so I can ack the interrupt once we |
78 | * know that we'll be handling the IRQ but before we invoke the callback since |
79 | * the callback may notify the host which results in the host attempting to |
80 | * raise an interrupt that we would then mask once we acknowledged the |
81 | * interrupt. */ |
82 | static irqreturn_t vp_interrupt(int irq, void *opaque) |
83 | { |
84 | struct virtio_pci_device *vp_dev = opaque; |
85 | u8 isr; |
86 | |
87 | /* reading the ISR has the effect of also clearing it so it's very |
88 | * important to save off the value. */ |
89 | isr = ioread8(vp_dev->isr); |
90 | |
91 | /* It's definitely not us if the ISR was not high */ |
92 | if (!isr) |
93 | return IRQ_NONE; |
94 | |
95 | /* Configuration change? Tell driver if it wants to know. */ |
96 | if (isr & VIRTIO_PCI_ISR_CONFIG) |
97 | vp_config_changed(irq, opaque); |
98 | |
99 | return vp_vring_interrupt(irq, opaque); |
100 | } |
101 | |
102 | static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, |
103 | bool per_vq_vectors, struct irq_affinity *desc) |
104 | { |
105 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
106 | const char *name = dev_name(dev: &vp_dev->vdev.dev); |
107 | unsigned int flags = PCI_IRQ_MSIX; |
108 | unsigned int i, v; |
109 | int err = -ENOMEM; |
110 | |
111 | vp_dev->msix_vectors = nvectors; |
112 | |
113 | vp_dev->msix_names = kmalloc_array(n: nvectors, |
114 | size: sizeof(*vp_dev->msix_names), |
115 | GFP_KERNEL); |
116 | if (!vp_dev->msix_names) |
117 | goto error; |
118 | vp_dev->msix_affinity_masks |
119 | = kcalloc(n: nvectors, size: sizeof(*vp_dev->msix_affinity_masks), |
120 | GFP_KERNEL); |
121 | if (!vp_dev->msix_affinity_masks) |
122 | goto error; |
123 | for (i = 0; i < nvectors; ++i) |
124 | if (!alloc_cpumask_var(mask: &vp_dev->msix_affinity_masks[i], |
125 | GFP_KERNEL)) |
126 | goto error; |
127 | |
128 | if (desc) { |
129 | flags |= PCI_IRQ_AFFINITY; |
130 | desc->pre_vectors++; /* virtio config vector */ |
131 | } |
132 | |
133 | err = pci_alloc_irq_vectors_affinity(dev: vp_dev->pci_dev, min_vecs: nvectors, |
134 | max_vecs: nvectors, flags, affd: desc); |
135 | if (err < 0) |
136 | goto error; |
137 | vp_dev->msix_enabled = 1; |
138 | |
139 | /* Set the vector used for configuration */ |
140 | v = vp_dev->msix_used_vectors; |
141 | snprintf(buf: vp_dev->msix_names[v], size: sizeof *vp_dev->msix_names, |
142 | fmt: "%s-config" , name); |
143 | err = request_irq(irq: pci_irq_vector(dev: vp_dev->pci_dev, nr: v), |
144 | handler: vp_config_changed, flags: 0, name: vp_dev->msix_names[v], |
145 | dev: vp_dev); |
146 | if (err) |
147 | goto error; |
148 | ++vp_dev->msix_used_vectors; |
149 | |
150 | v = vp_dev->config_vector(vp_dev, v); |
151 | /* Verify we had enough resources to assign the vector */ |
152 | if (v == VIRTIO_MSI_NO_VECTOR) { |
153 | err = -EBUSY; |
154 | goto error; |
155 | } |
156 | |
157 | if (!per_vq_vectors) { |
158 | /* Shared vector for all VQs */ |
159 | v = vp_dev->msix_used_vectors; |
160 | snprintf(buf: vp_dev->msix_names[v], size: sizeof *vp_dev->msix_names, |
161 | fmt: "%s-virtqueues" , name); |
162 | err = request_irq(irq: pci_irq_vector(dev: vp_dev->pci_dev, nr: v), |
163 | handler: vp_vring_interrupt, flags: 0, name: vp_dev->msix_names[v], |
164 | dev: vp_dev); |
165 | if (err) |
166 | goto error; |
167 | ++vp_dev->msix_used_vectors; |
168 | } |
169 | return 0; |
170 | error: |
171 | return err; |
172 | } |
173 | |
174 | static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index, |
175 | void (*callback)(struct virtqueue *vq), |
176 | const char *name, |
177 | bool ctx, |
178 | u16 msix_vec) |
179 | { |
180 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
181 | struct virtio_pci_vq_info *info = kmalloc(size: sizeof *info, GFP_KERNEL); |
182 | struct virtqueue *vq; |
183 | unsigned long flags; |
184 | |
185 | /* fill out our structure that represents an active queue */ |
186 | if (!info) |
187 | return ERR_PTR(error: -ENOMEM); |
188 | |
189 | vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx, |
190 | msix_vec); |
191 | if (IS_ERR(ptr: vq)) |
192 | goto out_info; |
193 | |
194 | info->vq = vq; |
195 | if (callback) { |
196 | spin_lock_irqsave(&vp_dev->lock, flags); |
197 | list_add(new: &info->node, head: &vp_dev->virtqueues); |
198 | spin_unlock_irqrestore(lock: &vp_dev->lock, flags); |
199 | } else { |
200 | INIT_LIST_HEAD(list: &info->node); |
201 | } |
202 | |
203 | vp_dev->vqs[index] = info; |
204 | return vq; |
205 | |
206 | out_info: |
207 | kfree(objp: info); |
208 | return vq; |
209 | } |
210 | |
211 | static void vp_del_vq(struct virtqueue *vq) |
212 | { |
213 | struct virtio_pci_device *vp_dev = to_vp_device(vdev: vq->vdev); |
214 | struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; |
215 | unsigned long flags; |
216 | |
217 | /* |
218 | * If it fails during re-enable reset vq. This way we won't rejoin |
219 | * info->node to the queue. Prevent unexpected irqs. |
220 | */ |
221 | if (!vq->reset) { |
222 | spin_lock_irqsave(&vp_dev->lock, flags); |
223 | list_del(entry: &info->node); |
224 | spin_unlock_irqrestore(lock: &vp_dev->lock, flags); |
225 | } |
226 | |
227 | vp_dev->del_vq(info); |
228 | kfree(objp: info); |
229 | } |
230 | |
231 | /* the config->del_vqs() implementation */ |
232 | void vp_del_vqs(struct virtio_device *vdev) |
233 | { |
234 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
235 | struct virtqueue *vq, *n; |
236 | int i; |
237 | |
238 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) { |
239 | if (vp_dev->per_vq_vectors) { |
240 | int v = vp_dev->vqs[vq->index]->msix_vector; |
241 | |
242 | if (v != VIRTIO_MSI_NO_VECTOR) { |
243 | int irq = pci_irq_vector(dev: vp_dev->pci_dev, nr: v); |
244 | |
245 | irq_set_affinity_hint(irq, NULL); |
246 | free_irq(irq, vq); |
247 | } |
248 | } |
249 | vp_del_vq(vq); |
250 | } |
251 | vp_dev->per_vq_vectors = false; |
252 | |
253 | if (vp_dev->intx_enabled) { |
254 | free_irq(vp_dev->pci_dev->irq, vp_dev); |
255 | vp_dev->intx_enabled = 0; |
256 | } |
257 | |
258 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) |
259 | free_irq(pci_irq_vector(dev: vp_dev->pci_dev, nr: i), vp_dev); |
260 | |
261 | if (vp_dev->msix_affinity_masks) { |
262 | for (i = 0; i < vp_dev->msix_vectors; i++) |
263 | free_cpumask_var(mask: vp_dev->msix_affinity_masks[i]); |
264 | } |
265 | |
266 | if (vp_dev->msix_enabled) { |
267 | /* Disable the vector used for configuration */ |
268 | vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); |
269 | |
270 | pci_free_irq_vectors(dev: vp_dev->pci_dev); |
271 | vp_dev->msix_enabled = 0; |
272 | } |
273 | |
274 | vp_dev->msix_vectors = 0; |
275 | vp_dev->msix_used_vectors = 0; |
276 | kfree(objp: vp_dev->msix_names); |
277 | vp_dev->msix_names = NULL; |
278 | kfree(objp: vp_dev->msix_affinity_masks); |
279 | vp_dev->msix_affinity_masks = NULL; |
280 | kfree(objp: vp_dev->vqs); |
281 | vp_dev->vqs = NULL; |
282 | } |
283 | |
284 | static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, |
285 | struct virtqueue *vqs[], vq_callback_t *callbacks[], |
286 | const char * const names[], bool per_vq_vectors, |
287 | const bool *ctx, |
288 | struct irq_affinity *desc) |
289 | { |
290 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
291 | u16 msix_vec; |
292 | int i, err, nvectors, allocated_vectors, queue_idx = 0; |
293 | |
294 | vp_dev->vqs = kcalloc(n: nvqs, size: sizeof(*vp_dev->vqs), GFP_KERNEL); |
295 | if (!vp_dev->vqs) |
296 | return -ENOMEM; |
297 | |
298 | if (per_vq_vectors) { |
299 | /* Best option: one for change interrupt, one per vq. */ |
300 | nvectors = 1; |
301 | for (i = 0; i < nvqs; ++i) |
302 | if (names[i] && callbacks[i]) |
303 | ++nvectors; |
304 | } else { |
305 | /* Second best: one for change, shared for all vqs. */ |
306 | nvectors = 2; |
307 | } |
308 | |
309 | err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, |
310 | desc: per_vq_vectors ? desc : NULL); |
311 | if (err) |
312 | goto error_find; |
313 | |
314 | vp_dev->per_vq_vectors = per_vq_vectors; |
315 | allocated_vectors = vp_dev->msix_used_vectors; |
316 | for (i = 0; i < nvqs; ++i) { |
317 | if (!names[i]) { |
318 | vqs[i] = NULL; |
319 | continue; |
320 | } |
321 | |
322 | if (!callbacks[i]) |
323 | msix_vec = VIRTIO_MSI_NO_VECTOR; |
324 | else if (vp_dev->per_vq_vectors) |
325 | msix_vec = allocated_vectors++; |
326 | else |
327 | msix_vec = VP_MSIX_VQ_VECTOR; |
328 | vqs[i] = vp_setup_vq(vdev, index: queue_idx++, callback: callbacks[i], name: names[i], |
329 | ctx: ctx ? ctx[i] : false, |
330 | msix_vec); |
331 | if (IS_ERR(ptr: vqs[i])) { |
332 | err = PTR_ERR(ptr: vqs[i]); |
333 | goto error_find; |
334 | } |
335 | |
336 | if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) |
337 | continue; |
338 | |
339 | /* allocate per-vq irq if available and necessary */ |
340 | snprintf(buf: vp_dev->msix_names[msix_vec], |
341 | size: sizeof *vp_dev->msix_names, |
342 | fmt: "%s-%s" , |
343 | dev_name(dev: &vp_dev->vdev.dev), names[i]); |
344 | err = request_irq(irq: pci_irq_vector(dev: vp_dev->pci_dev, nr: msix_vec), |
345 | handler: vring_interrupt, flags: 0, |
346 | name: vp_dev->msix_names[msix_vec], |
347 | dev: vqs[i]); |
348 | if (err) |
349 | goto error_find; |
350 | } |
351 | return 0; |
352 | |
353 | error_find: |
354 | vp_del_vqs(vdev); |
355 | return err; |
356 | } |
357 | |
358 | static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, |
359 | struct virtqueue *vqs[], vq_callback_t *callbacks[], |
360 | const char * const names[], const bool *ctx) |
361 | { |
362 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
363 | int i, err, queue_idx = 0; |
364 | |
365 | vp_dev->vqs = kcalloc(n: nvqs, size: sizeof(*vp_dev->vqs), GFP_KERNEL); |
366 | if (!vp_dev->vqs) |
367 | return -ENOMEM; |
368 | |
369 | err = request_irq(irq: vp_dev->pci_dev->irq, handler: vp_interrupt, IRQF_SHARED, |
370 | name: dev_name(dev: &vdev->dev), dev: vp_dev); |
371 | if (err) |
372 | goto out_del_vqs; |
373 | |
374 | vp_dev->intx_enabled = 1; |
375 | vp_dev->per_vq_vectors = false; |
376 | for (i = 0; i < nvqs; ++i) { |
377 | if (!names[i]) { |
378 | vqs[i] = NULL; |
379 | continue; |
380 | } |
381 | vqs[i] = vp_setup_vq(vdev, index: queue_idx++, callback: callbacks[i], name: names[i], |
382 | ctx: ctx ? ctx[i] : false, |
383 | VIRTIO_MSI_NO_VECTOR); |
384 | if (IS_ERR(ptr: vqs[i])) { |
385 | err = PTR_ERR(ptr: vqs[i]); |
386 | goto out_del_vqs; |
387 | } |
388 | } |
389 | |
390 | return 0; |
391 | out_del_vqs: |
392 | vp_del_vqs(vdev); |
393 | return err; |
394 | } |
395 | |
396 | /* the config->find_vqs() implementation */ |
397 | int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs, |
398 | struct virtqueue *vqs[], vq_callback_t *callbacks[], |
399 | const char * const names[], const bool *ctx, |
400 | struct irq_affinity *desc) |
401 | { |
402 | int err; |
403 | |
404 | /* Try MSI-X with one vector per queue. */ |
405 | err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, per_vq_vectors: true, ctx, desc); |
406 | if (!err) |
407 | return 0; |
408 | /* Fallback: MSI-X with one vector for config, one shared for queues. */ |
409 | err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, per_vq_vectors: false, ctx, desc); |
410 | if (!err) |
411 | return 0; |
412 | /* Is there an interrupt? If not give up. */ |
413 | if (!(to_vp_device(vdev)->pci_dev->irq)) |
414 | return err; |
415 | /* Finally fall back to regular interrupts. */ |
416 | return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx); |
417 | } |
418 | |
419 | const char *vp_bus_name(struct virtio_device *vdev) |
420 | { |
421 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
422 | |
423 | return pci_name(pdev: vp_dev->pci_dev); |
424 | } |
425 | |
426 | /* Setup the affinity for a virtqueue: |
427 | * - force the affinity for per vq vector |
428 | * - OR over all affinities for shared MSI |
429 | * - ignore the affinity request if we're using INTX |
430 | */ |
431 | int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) |
432 | { |
433 | struct virtio_device *vdev = vq->vdev; |
434 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
435 | struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; |
436 | struct cpumask *mask; |
437 | unsigned int irq; |
438 | |
439 | if (!vq->callback) |
440 | return -EINVAL; |
441 | |
442 | if (vp_dev->msix_enabled) { |
443 | mask = vp_dev->msix_affinity_masks[info->msix_vector]; |
444 | irq = pci_irq_vector(dev: vp_dev->pci_dev, nr: info->msix_vector); |
445 | if (!cpu_mask) |
446 | irq_set_affinity_hint(irq, NULL); |
447 | else { |
448 | cpumask_copy(dstp: mask, srcp: cpu_mask); |
449 | irq_set_affinity_hint(irq, m: mask); |
450 | } |
451 | } |
452 | return 0; |
453 | } |
454 | |
455 | const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) |
456 | { |
457 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
458 | |
459 | if (!vp_dev->per_vq_vectors || |
460 | vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR) |
461 | return NULL; |
462 | |
463 | return pci_irq_get_affinity(pdev: vp_dev->pci_dev, |
464 | vec: vp_dev->vqs[index]->msix_vector); |
465 | } |
466 | |
467 | #ifdef CONFIG_PM_SLEEP |
468 | static int virtio_pci_freeze(struct device *dev) |
469 | { |
470 | struct pci_dev *pci_dev = to_pci_dev(dev); |
471 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pdev: pci_dev); |
472 | int ret; |
473 | |
474 | ret = virtio_device_freeze(dev: &vp_dev->vdev); |
475 | |
476 | if (!ret) |
477 | pci_disable_device(dev: pci_dev); |
478 | return ret; |
479 | } |
480 | |
481 | static int virtio_pci_restore(struct device *dev) |
482 | { |
483 | struct pci_dev *pci_dev = to_pci_dev(dev); |
484 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pdev: pci_dev); |
485 | int ret; |
486 | |
487 | ret = pci_enable_device(dev: pci_dev); |
488 | if (ret) |
489 | return ret; |
490 | |
491 | pci_set_master(dev: pci_dev); |
492 | return virtio_device_restore(dev: &vp_dev->vdev); |
493 | } |
494 | |
495 | static const struct dev_pm_ops virtio_pci_pm_ops = { |
496 | SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) |
497 | }; |
498 | #endif |
499 | |
500 | |
501 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ |
502 | static const struct pci_device_id virtio_pci_id_table[] = { |
503 | { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) }, |
504 | { 0 } |
505 | }; |
506 | |
507 | MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); |
508 | |
509 | static void virtio_pci_release_dev(struct device *_d) |
510 | { |
511 | struct virtio_device *vdev = dev_to_virtio(_d); |
512 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
513 | |
514 | /* As struct device is a kobject, it's not safe to |
515 | * free the memory (including the reference counter itself) |
516 | * until it's release callback. */ |
517 | kfree(objp: vp_dev); |
518 | } |
519 | |
520 | static int virtio_pci_probe(struct pci_dev *pci_dev, |
521 | const struct pci_device_id *id) |
522 | { |
523 | struct virtio_pci_device *vp_dev, *reg_dev = NULL; |
524 | int rc; |
525 | |
526 | /* allocate our structure and fill it out */ |
527 | vp_dev = kzalloc(size: sizeof(struct virtio_pci_device), GFP_KERNEL); |
528 | if (!vp_dev) |
529 | return -ENOMEM; |
530 | |
531 | pci_set_drvdata(pdev: pci_dev, data: vp_dev); |
532 | vp_dev->vdev.dev.parent = &pci_dev->dev; |
533 | vp_dev->vdev.dev.release = virtio_pci_release_dev; |
534 | vp_dev->pci_dev = pci_dev; |
535 | INIT_LIST_HEAD(list: &vp_dev->virtqueues); |
536 | spin_lock_init(&vp_dev->lock); |
537 | |
538 | /* enable the device */ |
539 | rc = pci_enable_device(dev: pci_dev); |
540 | if (rc) |
541 | goto err_enable_device; |
542 | |
543 | if (force_legacy) { |
544 | rc = virtio_pci_legacy_probe(vp_dev); |
545 | /* Also try modern mode if we can't map BAR0 (no IO space). */ |
546 | if (rc == -ENODEV || rc == -ENOMEM) |
547 | rc = virtio_pci_modern_probe(vp_dev); |
548 | if (rc) |
549 | goto err_probe; |
550 | } else { |
551 | rc = virtio_pci_modern_probe(vp_dev); |
552 | if (rc == -ENODEV) |
553 | rc = virtio_pci_legacy_probe(vp_dev); |
554 | if (rc) |
555 | goto err_probe; |
556 | } |
557 | |
558 | pci_set_master(dev: pci_dev); |
559 | |
560 | rc = register_virtio_device(dev: &vp_dev->vdev); |
561 | reg_dev = vp_dev; |
562 | if (rc) |
563 | goto err_register; |
564 | |
565 | return 0; |
566 | |
567 | err_register: |
568 | if (vp_dev->is_legacy) |
569 | virtio_pci_legacy_remove(vp_dev); |
570 | else |
571 | virtio_pci_modern_remove(vp_dev); |
572 | err_probe: |
573 | pci_disable_device(dev: pci_dev); |
574 | err_enable_device: |
575 | if (reg_dev) |
576 | put_device(dev: &vp_dev->vdev.dev); |
577 | else |
578 | kfree(objp: vp_dev); |
579 | return rc; |
580 | } |
581 | |
582 | static void virtio_pci_remove(struct pci_dev *pci_dev) |
583 | { |
584 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pdev: pci_dev); |
585 | struct device *dev = get_device(dev: &vp_dev->vdev.dev); |
586 | |
587 | /* |
588 | * Device is marked broken on surprise removal so that virtio upper |
589 | * layers can abort any ongoing operation. |
590 | */ |
591 | if (!pci_device_is_present(pdev: pci_dev)) |
592 | virtio_break_device(dev: &vp_dev->vdev); |
593 | |
594 | pci_disable_sriov(dev: pci_dev); |
595 | |
596 | unregister_virtio_device(dev: &vp_dev->vdev); |
597 | |
598 | if (vp_dev->is_legacy) |
599 | virtio_pci_legacy_remove(vp_dev); |
600 | else |
601 | virtio_pci_modern_remove(vp_dev); |
602 | |
603 | pci_disable_device(dev: pci_dev); |
604 | put_device(dev); |
605 | } |
606 | |
607 | static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs) |
608 | { |
609 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pdev: pci_dev); |
610 | struct virtio_device *vdev = &vp_dev->vdev; |
611 | int ret; |
612 | |
613 | if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK)) |
614 | return -EBUSY; |
615 | |
616 | if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV)) |
617 | return -EINVAL; |
618 | |
619 | if (pci_vfs_assigned(dev: pci_dev)) |
620 | return -EPERM; |
621 | |
622 | if (num_vfs == 0) { |
623 | pci_disable_sriov(dev: pci_dev); |
624 | return 0; |
625 | } |
626 | |
627 | ret = pci_enable_sriov(dev: pci_dev, nr_virtfn: num_vfs); |
628 | if (ret < 0) |
629 | return ret; |
630 | |
631 | return num_vfs; |
632 | } |
633 | |
634 | static struct pci_driver virtio_pci_driver = { |
635 | .name = "virtio-pci" , |
636 | .id_table = virtio_pci_id_table, |
637 | .probe = virtio_pci_probe, |
638 | .remove = virtio_pci_remove, |
639 | #ifdef CONFIG_PM_SLEEP |
640 | .driver.pm = &virtio_pci_pm_ops, |
641 | #endif |
642 | .sriov_configure = virtio_pci_sriov_configure, |
643 | }; |
644 | |
645 | module_pci_driver(virtio_pci_driver); |
646 | |
647 | MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>" ); |
648 | MODULE_DESCRIPTION("virtio-pci" ); |
649 | MODULE_LICENSE("GPL" ); |
650 | MODULE_VERSION("1" ); |
651 | |