1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and |
4 | * initial domain support. We also handle the DSDT _PRT callbacks for GSI's |
5 | * used in HVM and initial domain mode (PV does not parse ACPI, so it has no |
6 | * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and |
7 | * 0xcf8 PCI configuration read/write. |
8 | * |
9 | * Author: Ryan Wilson <hap9@epoch.ncsc.mil> |
10 | * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
11 | * Stefano Stabellini <stefano.stabellini@eu.citrix.com> |
12 | */ |
13 | #include <linux/export.h> |
14 | #include <linux/init.h> |
15 | #include <linux/pci.h> |
16 | #include <linux/acpi.h> |
17 | |
18 | #include <linux/io.h> |
19 | #include <asm/io_apic.h> |
20 | #include <asm/pci_x86.h> |
21 | |
22 | #include <asm/xen/hypervisor.h> |
23 | |
24 | #include <xen/features.h> |
25 | #include <xen/events.h> |
26 | #include <xen/pci.h> |
27 | #include <asm/xen/pci.h> |
28 | #include <asm/xen/cpuid.h> |
29 | #include <asm/apic.h> |
30 | #include <asm/acpi.h> |
31 | #include <asm/i8259.h> |
32 | |
33 | static int xen_pcifront_enable_irq(struct pci_dev *dev) |
34 | { |
35 | int rc; |
36 | int share = 1; |
37 | int pirq; |
38 | u8 gsi; |
39 | |
40 | rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, val: &gsi); |
41 | if (rc < 0) { |
42 | dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n" , |
43 | rc); |
44 | return rc; |
45 | } |
46 | /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/ |
47 | pirq = gsi; |
48 | |
49 | if (gsi < nr_legacy_irqs()) |
50 | share = 0; |
51 | |
52 | rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable: share, name: "pcifront" ); |
53 | if (rc < 0) { |
54 | dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n" , |
55 | gsi, pirq, rc); |
56 | return rc; |
57 | } |
58 | |
59 | dev->irq = rc; |
60 | dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n" , gsi, dev->irq); |
61 | return 0; |
62 | } |
63 | |
64 | #ifdef CONFIG_ACPI |
65 | static int xen_register_pirq(u32 gsi, int triggering, bool set_pirq) |
66 | { |
67 | int rc, pirq = -1, irq; |
68 | struct physdev_map_pirq map_irq; |
69 | int shareable = 0; |
70 | char *name; |
71 | |
72 | irq = xen_irq_from_gsi(gsi); |
73 | if (irq > 0) |
74 | return irq; |
75 | |
76 | if (set_pirq) |
77 | pirq = gsi; |
78 | |
79 | map_irq.domid = DOMID_SELF; |
80 | map_irq.type = MAP_PIRQ_TYPE_GSI; |
81 | map_irq.index = gsi; |
82 | map_irq.pirq = pirq; |
83 | |
84 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, arg: &map_irq); |
85 | if (rc) { |
86 | printk(KERN_WARNING "xen map irq failed %d\n" , rc); |
87 | return -1; |
88 | } |
89 | |
90 | if (triggering == ACPI_EDGE_SENSITIVE) { |
91 | shareable = 0; |
92 | name = "ioapic-edge" ; |
93 | } else { |
94 | shareable = 1; |
95 | name = "ioapic-level" ; |
96 | } |
97 | |
98 | irq = xen_bind_pirq_gsi_to_irq(gsi, pirq: map_irq.pirq, shareable, name); |
99 | if (irq < 0) |
100 | goto out; |
101 | |
102 | printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n" , map_irq.pirq, irq, gsi); |
103 | out: |
104 | return irq; |
105 | } |
106 | |
107 | static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, |
108 | int trigger, int polarity) |
109 | { |
110 | if (!xen_hvm_domain()) |
111 | return -1; |
112 | |
113 | return xen_register_pirq(gsi, triggering: trigger, |
114 | set_pirq: false /* no mapping of GSI to PIRQ */); |
115 | } |
116 | |
117 | #ifdef CONFIG_XEN_PV_DOM0 |
118 | static int xen_register_gsi(u32 gsi, int triggering, int polarity) |
119 | { |
120 | int rc, irq; |
121 | struct physdev_setup_gsi setup_gsi; |
122 | |
123 | if (!xen_pv_domain()) |
124 | return -1; |
125 | |
126 | printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n" , |
127 | gsi, triggering, polarity); |
128 | |
129 | irq = xen_register_pirq(gsi, triggering, set_pirq: true); |
130 | |
131 | setup_gsi.gsi = gsi; |
132 | setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); |
133 | setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); |
134 | |
135 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, arg: &setup_gsi); |
136 | if (rc == -EEXIST) |
137 | printk(KERN_INFO "Already setup the GSI :%d\n" , gsi); |
138 | else if (rc) { |
139 | printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n" , |
140 | gsi, rc); |
141 | } |
142 | |
143 | return irq; |
144 | } |
145 | |
146 | static int acpi_register_gsi_xen(struct device *dev, u32 gsi, |
147 | int trigger, int polarity) |
148 | { |
149 | return xen_register_gsi(gsi, triggering: trigger, polarity); |
150 | } |
151 | #endif |
152 | #endif |
153 | |
154 | #if defined(CONFIG_PCI_MSI) |
155 | #include <linux/msi.h> |
156 | |
157 | struct xen_pci_frontend_ops *xen_pci_frontend; |
158 | EXPORT_SYMBOL_GPL(xen_pci_frontend); |
159 | |
160 | struct xen_msi_ops { |
161 | int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); |
162 | void (*teardown_msi_irqs)(struct pci_dev *dev); |
163 | }; |
164 | |
165 | static struct xen_msi_ops xen_msi_ops __ro_after_init; |
166 | |
167 | static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
168 | { |
169 | int irq, ret, i; |
170 | struct msi_desc *msidesc; |
171 | int *v; |
172 | |
173 | if (type == PCI_CAP_ID_MSI && nvec > 1) |
174 | return 1; |
175 | |
176 | v = kcalloc(max(1, nvec), size: sizeof(int), GFP_KERNEL); |
177 | if (!v) |
178 | return -ENOMEM; |
179 | |
180 | if (type == PCI_CAP_ID_MSIX) |
181 | ret = xen_pci_frontend_enable_msix(dev, vectors: v, nvec); |
182 | else |
183 | ret = xen_pci_frontend_enable_msi(dev, vectors: v); |
184 | if (ret) |
185 | goto error; |
186 | i = 0; |
187 | msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) { |
188 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq: v[i], |
189 | nvec: (type == PCI_CAP_ID_MSI) ? nvec : 1, |
190 | name: (type == PCI_CAP_ID_MSIX) ? |
191 | "pcifront-msi-x" : |
192 | "pcifront-msi" , |
193 | DOMID_SELF); |
194 | if (irq < 0) { |
195 | ret = irq; |
196 | goto free; |
197 | } |
198 | i++; |
199 | } |
200 | kfree(objp: v); |
201 | return msi_device_populate_sysfs(dev: &dev->dev); |
202 | |
203 | error: |
204 | if (ret == -ENOSYS) |
205 | dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n" ); |
206 | else if (ret) |
207 | dev_err(&dev->dev, "Xen PCI frontend error: %d!\n" , ret); |
208 | free: |
209 | kfree(objp: v); |
210 | return ret; |
211 | } |
212 | |
213 | static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq, |
214 | struct msi_msg *msg) |
215 | { |
216 | /* |
217 | * We set vector == 0 to tell the hypervisor we don't care about |
218 | * it, but we want a pirq setup instead. We use the dest_id fields |
219 | * to pass the pirq that we want. |
220 | */ |
221 | memset(msg, 0, sizeof(*msg)); |
222 | msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; |
223 | msg->arch_addr_hi.destid_8_31 = pirq >> 8; |
224 | msg->arch_addr_lo.destid_0_7 = pirq & 0xFF; |
225 | msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW; |
226 | msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_EXTINT; |
227 | } |
228 | |
229 | static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
230 | { |
231 | int irq, pirq; |
232 | struct msi_desc *msidesc; |
233 | struct msi_msg msg; |
234 | |
235 | if (type == PCI_CAP_ID_MSI && nvec > 1) |
236 | return 1; |
237 | |
238 | msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) { |
239 | pirq = xen_allocate_pirq_msi(dev, msidesc); |
240 | if (pirq < 0) { |
241 | irq = -ENODEV; |
242 | goto error; |
243 | } |
244 | xen_msi_compose_msg(pdev: dev, pirq, msg: &msg); |
245 | __pci_write_msi_msg(entry: msidesc, msg: &msg); |
246 | dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n" , pirq); |
247 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, |
248 | nvec: (type == PCI_CAP_ID_MSI) ? nvec : 1, |
249 | name: (type == PCI_CAP_ID_MSIX) ? |
250 | "msi-x" : "msi" , |
251 | DOMID_SELF); |
252 | if (irq < 0) |
253 | goto error; |
254 | dev_dbg(&dev->dev, |
255 | "xen: msi --> pirq=%d --> irq=%d\n" , pirq, irq); |
256 | } |
257 | return msi_device_populate_sysfs(dev: &dev->dev); |
258 | |
259 | error: |
260 | dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n" , |
261 | type == PCI_CAP_ID_MSI ? "" : "-X" , irq); |
262 | return irq; |
263 | } |
264 | |
265 | #ifdef CONFIG_XEN_PV_DOM0 |
266 | static bool __read_mostly pci_seg_supported = true; |
267 | |
268 | static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
269 | { |
270 | int ret = 0; |
271 | struct msi_desc *msidesc; |
272 | |
273 | msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) { |
274 | struct physdev_map_pirq map_irq; |
275 | domid_t domid; |
276 | |
277 | domid = ret = xen_find_device_domain_owner(dev); |
278 | /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED, |
279 | * hence check ret value for < 0. */ |
280 | if (ret < 0) |
281 | domid = DOMID_SELF; |
282 | |
283 | memset(&map_irq, 0, sizeof(map_irq)); |
284 | map_irq.domid = domid; |
285 | map_irq.type = MAP_PIRQ_TYPE_MSI_SEG; |
286 | map_irq.index = -1; |
287 | map_irq.pirq = -1; |
288 | map_irq.bus = dev->bus->number | |
289 | (pci_domain_nr(bus: dev->bus) << 16); |
290 | map_irq.devfn = dev->devfn; |
291 | |
292 | if (type == PCI_CAP_ID_MSI && nvec > 1) { |
293 | map_irq.type = MAP_PIRQ_TYPE_MULTI_MSI; |
294 | map_irq.entry_nr = nvec; |
295 | } else if (type == PCI_CAP_ID_MSIX) { |
296 | int pos; |
297 | unsigned long flags; |
298 | u32 table_offset, bir; |
299 | |
300 | pos = dev->msix_cap; |
301 | pci_read_config_dword(dev, where: pos + PCI_MSIX_TABLE, |
302 | val: &table_offset); |
303 | bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); |
304 | flags = pci_resource_flags(dev, bir); |
305 | if (!flags || (flags & IORESOURCE_UNSET)) |
306 | return -EINVAL; |
307 | |
308 | map_irq.table_base = pci_resource_start(dev, bir); |
309 | map_irq.entry_nr = msidesc->msi_index; |
310 | } |
311 | |
312 | ret = -EINVAL; |
313 | if (pci_seg_supported) |
314 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, |
315 | arg: &map_irq); |
316 | if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) { |
317 | /* |
318 | * If MAP_PIRQ_TYPE_MULTI_MSI is not available |
319 | * there's nothing else we can do in this case. |
320 | * Just set ret > 0 so driver can retry with |
321 | * single MSI. |
322 | */ |
323 | ret = 1; |
324 | goto out; |
325 | } |
326 | if (ret == -EINVAL && !pci_domain_nr(bus: dev->bus)) { |
327 | map_irq.type = MAP_PIRQ_TYPE_MSI; |
328 | map_irq.index = -1; |
329 | map_irq.pirq = -1; |
330 | map_irq.bus = dev->bus->number; |
331 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, |
332 | arg: &map_irq); |
333 | if (ret != -EINVAL) |
334 | pci_seg_supported = false; |
335 | } |
336 | if (ret) { |
337 | dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n" , |
338 | ret, domid); |
339 | goto out; |
340 | } |
341 | |
342 | ret = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq: map_irq.pirq, |
343 | nvec: (type == PCI_CAP_ID_MSI) ? nvec : 1, |
344 | name: (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi" , |
345 | domid); |
346 | if (ret < 0) |
347 | goto out; |
348 | } |
349 | ret = msi_device_populate_sysfs(dev: &dev->dev); |
350 | out: |
351 | return ret; |
352 | } |
353 | |
354 | bool xen_initdom_restore_msi(struct pci_dev *dev) |
355 | { |
356 | int ret = 0; |
357 | |
358 | if (!xen_initial_domain()) |
359 | return true; |
360 | |
361 | if (pci_seg_supported) { |
362 | struct physdev_pci_device restore_ext; |
363 | |
364 | restore_ext.seg = pci_domain_nr(bus: dev->bus); |
365 | restore_ext.bus = dev->bus->number; |
366 | restore_ext.devfn = dev->devfn; |
367 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext, |
368 | arg: &restore_ext); |
369 | if (ret == -ENOSYS) |
370 | pci_seg_supported = false; |
371 | WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n" , ret); |
372 | } |
373 | if (!pci_seg_supported) { |
374 | struct physdev_restore_msi restore; |
375 | |
376 | restore.bus = dev->bus->number; |
377 | restore.devfn = dev->devfn; |
378 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, arg: &restore); |
379 | WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n" , ret); |
380 | } |
381 | return false; |
382 | } |
383 | #else /* CONFIG_XEN_PV_DOM0 */ |
384 | #define xen_initdom_setup_msi_irqs NULL |
385 | #endif /* !CONFIG_XEN_PV_DOM0 */ |
386 | |
387 | static void xen_teardown_msi_irqs(struct pci_dev *dev) |
388 | { |
389 | struct msi_desc *msidesc; |
390 | int i; |
391 | |
392 | msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_ASSOCIATED) { |
393 | for (i = 0; i < msidesc->nvec_used; i++) |
394 | xen_destroy_irq(irq: msidesc->irq + i); |
395 | msidesc->irq = 0; |
396 | } |
397 | |
398 | msi_device_destroy_sysfs(dev: &dev->dev); |
399 | } |
400 | |
401 | static void xen_pv_teardown_msi_irqs(struct pci_dev *dev) |
402 | { |
403 | if (dev->msix_enabled) |
404 | xen_pci_frontend_disable_msix(dev); |
405 | else |
406 | xen_pci_frontend_disable_msi(dev); |
407 | |
408 | xen_teardown_msi_irqs(dev); |
409 | } |
410 | |
411 | static int xen_msi_domain_alloc_irqs(struct irq_domain *domain, |
412 | struct device *dev, int nvec) |
413 | { |
414 | int type; |
415 | |
416 | if (WARN_ON_ONCE(!dev_is_pci(dev))) |
417 | return -EINVAL; |
418 | |
419 | type = to_pci_dev(dev)->msix_enabled ? PCI_CAP_ID_MSIX : PCI_CAP_ID_MSI; |
420 | |
421 | return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type); |
422 | } |
423 | |
424 | static void xen_msi_domain_free_irqs(struct irq_domain *domain, |
425 | struct device *dev) |
426 | { |
427 | if (WARN_ON_ONCE(!dev_is_pci(dev))) |
428 | return; |
429 | |
430 | xen_msi_ops.teardown_msi_irqs(to_pci_dev(dev)); |
431 | } |
432 | |
433 | static struct msi_domain_ops xen_pci_msi_domain_ops = { |
434 | .domain_alloc_irqs = xen_msi_domain_alloc_irqs, |
435 | .domain_free_irqs = xen_msi_domain_free_irqs, |
436 | }; |
437 | |
438 | static struct msi_domain_info xen_pci_msi_domain_info = { |
439 | .flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | MSI_FLAG_DEV_SYSFS, |
440 | .ops = &xen_pci_msi_domain_ops, |
441 | }; |
442 | |
443 | /* |
444 | * This irq domain is a blatant violation of the irq domain design, but |
445 | * distangling XEN into real irq domains is not a job for mere mortals with |
446 | * limited XENology. But it's the least dangerous way for a mere mortal to |
447 | * get rid of the arch_*_msi_irqs() hackery in order to store the irq |
448 | * domain pointer in struct device. This irq domain wrappery allows to do |
449 | * that without breaking XEN terminally. |
450 | */ |
451 | static __init struct irq_domain *xen_create_pci_msi_domain(void) |
452 | { |
453 | struct irq_domain *d = NULL; |
454 | struct fwnode_handle *fn; |
455 | |
456 | fn = irq_domain_alloc_named_fwnode(name: "XEN-MSI" ); |
457 | if (fn) |
458 | d = msi_create_irq_domain(fwnode: fn, info: &xen_pci_msi_domain_info, NULL); |
459 | |
460 | /* FIXME: No idea how to survive if this fails */ |
461 | BUG_ON(!d); |
462 | |
463 | return d; |
464 | } |
465 | |
466 | static __init void xen_setup_pci_msi(void) |
467 | { |
468 | if (xen_pv_domain()) { |
469 | if (xen_initial_domain()) |
470 | xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs; |
471 | else |
472 | xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs; |
473 | xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs; |
474 | } else if (xen_hvm_domain()) { |
475 | xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs; |
476 | xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs; |
477 | } else { |
478 | WARN_ON_ONCE(1); |
479 | return; |
480 | } |
481 | |
482 | /* |
483 | * Override the PCI/MSI irq domain init function. No point |
484 | * in allocating the native domain and never use it. |
485 | */ |
486 | x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain; |
487 | /* |
488 | * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely |
489 | * controlled by the hypervisor. |
490 | */ |
491 | pci_msi_ignore_mask = 1; |
492 | } |
493 | |
494 | #else /* CONFIG_PCI_MSI */ |
495 | static inline void xen_setup_pci_msi(void) { } |
496 | #endif /* CONFIG_PCI_MSI */ |
497 | |
498 | int __init pci_xen_init(void) |
499 | { |
500 | if (!xen_pv_domain() || xen_initial_domain()) |
501 | return -ENODEV; |
502 | |
503 | printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n" ); |
504 | |
505 | pcibios_set_cache_line_size(); |
506 | |
507 | pcibios_enable_irq = xen_pcifront_enable_irq; |
508 | pcibios_disable_irq = NULL; |
509 | |
510 | /* Keep ACPI out of the picture */ |
511 | acpi_noirq_set(); |
512 | |
513 | xen_setup_pci_msi(); |
514 | return 0; |
515 | } |
516 | |
517 | #ifdef CONFIG_PCI_MSI |
518 | static void __init xen_hvm_msi_init(void) |
519 | { |
520 | if (!apic_is_disabled) { |
521 | /* |
522 | * If hardware supports (x2)APIC virtualization (as indicated |
523 | * by hypervisor's leaf 4) then we don't need to use pirqs/ |
524 | * event channels for MSI handling and instead use regular |
525 | * APIC processing |
526 | */ |
527 | uint32_t eax = cpuid_eax(op: xen_cpuid_base() + 4); |
528 | |
529 | if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) || |
530 | ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC))) |
531 | return; |
532 | } |
533 | xen_setup_pci_msi(); |
534 | } |
535 | #endif |
536 | |
537 | int __init pci_xen_hvm_init(void) |
538 | { |
539 | if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) |
540 | return 0; |
541 | |
542 | #ifdef CONFIG_ACPI |
543 | /* |
544 | * We don't want to change the actual ACPI delivery model, |
545 | * just how GSIs get registered. |
546 | */ |
547 | __acpi_register_gsi = acpi_register_gsi_xen_hvm; |
548 | __acpi_unregister_gsi = NULL; |
549 | #endif |
550 | |
551 | #ifdef CONFIG_PCI_MSI |
552 | /* |
553 | * We need to wait until after x2apic is initialized |
554 | * before we can set MSI IRQ ops. |
555 | */ |
556 | x86_platform.apic_post_init = xen_hvm_msi_init; |
557 | #endif |
558 | return 0; |
559 | } |
560 | |
561 | #ifdef CONFIG_XEN_PV_DOM0 |
562 | int __init pci_xen_initial_domain(void) |
563 | { |
564 | int irq; |
565 | |
566 | xen_setup_pci_msi(); |
567 | __acpi_register_gsi = acpi_register_gsi_xen; |
568 | __acpi_unregister_gsi = NULL; |
569 | /* |
570 | * Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here |
571 | * because we don't have a PIC and thus nr_legacy_irqs() is zero. |
572 | */ |
573 | for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { |
574 | int trigger, polarity; |
575 | |
576 | if (acpi_get_override_irq(gsi: irq, trigger: &trigger, polarity: &polarity) == -1) |
577 | continue; |
578 | |
579 | xen_register_pirq(gsi: irq, |
580 | triggering: trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE, |
581 | set_pirq: true /* Map GSI to PIRQ */); |
582 | } |
583 | if (0 == nr_ioapics) { |
584 | for (irq = 0; irq < nr_legacy_irqs(); irq++) |
585 | xen_bind_pirq_gsi_to_irq(gsi: irq, pirq: irq, shareable: 0, name: "xt-pic" ); |
586 | } |
587 | return 0; |
588 | } |
589 | #endif |
590 | |
591 | |