1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
4 | * Author: Joerg Roedel <jroedel@suse.de> |
5 | * Leo Duran <leo.duran@amd.com> |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) "AMD-Vi: " fmt |
9 | #define dev_fmt(fmt) pr_fmt(fmt) |
10 | |
11 | #include <linux/pci.h> |
12 | #include <linux/acpi.h> |
13 | #include <linux/list.h> |
14 | #include <linux/bitmap.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/syscore_ops.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/msi.h> |
19 | #include <linux/irq.h> |
20 | #include <linux/amd-iommu.h> |
21 | #include <linux/export.h> |
22 | #include <linux/kmemleak.h> |
23 | #include <linux/cc_platform.h> |
24 | #include <linux/iopoll.h> |
25 | #include <asm/pci-direct.h> |
26 | #include <asm/iommu.h> |
27 | #include <asm/apic.h> |
28 | #include <asm/gart.h> |
29 | #include <asm/x86_init.h> |
30 | #include <asm/io_apic.h> |
31 | #include <asm/irq_remapping.h> |
32 | #include <asm/set_memory.h> |
33 | #include <asm/sev.h> |
34 | |
35 | #include <linux/crash_dump.h> |
36 | |
37 | #include "amd_iommu.h" |
38 | #include "../irq_remapping.h" |
39 | |
40 | /* |
41 | * definitions for the ACPI scanning code |
42 | */ |
43 | #define 48 |
44 | |
45 | #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 |
46 | #define ACPI_IVMD_TYPE_ALL 0x20 |
47 | #define ACPI_IVMD_TYPE 0x21 |
48 | #define ACPI_IVMD_TYPE_RANGE 0x22 |
49 | |
50 | #define IVHD_DEV_ALL 0x01 |
51 | #define IVHD_DEV_SELECT 0x02 |
52 | #define IVHD_DEV_SELECT_RANGE_START 0x03 |
53 | #define IVHD_DEV_RANGE_END 0x04 |
54 | #define IVHD_DEV_ALIAS 0x42 |
55 | #define IVHD_DEV_ALIAS_RANGE 0x43 |
56 | #define IVHD_DEV_EXT_SELECT 0x46 |
57 | #define IVHD_DEV_EXT_SELECT_RANGE 0x47 |
58 | #define IVHD_DEV_SPECIAL 0x48 |
59 | #define IVHD_DEV_ACPI_HID 0xf0 |
60 | |
61 | #define UID_NOT_PRESENT 0 |
62 | #define UID_IS_INTEGER 1 |
63 | #define UID_IS_CHARACTER 2 |
64 | |
65 | #define IVHD_SPECIAL_IOAPIC 1 |
66 | #define IVHD_SPECIAL_HPET 2 |
67 | |
68 | #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 |
69 | #define IVHD_FLAG_PASSPW_EN_MASK 0x02 |
70 | #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 |
71 | #define IVHD_FLAG_ISOC_EN_MASK 0x08 |
72 | |
73 | #define IVMD_FLAG_EXCL_RANGE 0x08 |
74 | #define IVMD_FLAG_IW 0x04 |
75 | #define IVMD_FLAG_IR 0x02 |
76 | #define IVMD_FLAG_UNITY_MAP 0x01 |
77 | |
78 | #define ACPI_DEVFLAG_INITPASS 0x01 |
79 | #define ACPI_DEVFLAG_EXTINT 0x02 |
80 | #define ACPI_DEVFLAG_NMI 0x04 |
81 | #define ACPI_DEVFLAG_SYSMGT1 0x10 |
82 | #define ACPI_DEVFLAG_SYSMGT2 0x20 |
83 | #define ACPI_DEVFLAG_LINT0 0x40 |
84 | #define ACPI_DEVFLAG_LINT1 0x80 |
85 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 |
86 | |
87 | #define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ |
88 | | ((dev & 0x1f) << 3) | (fn & 0x7)) |
89 | |
90 | /* |
91 | * ACPI table definitions |
92 | * |
93 | * These data structures are laid over the table to parse the important values |
94 | * out of it. |
95 | */ |
96 | |
97 | /* |
98 | * structure describing one IOMMU in the ACPI table. Typically followed by one |
99 | * or more ivhd_entrys. |
100 | */ |
101 | struct { |
102 | u8 ; |
103 | u8 ; |
104 | u16 ; |
105 | u16 ; |
106 | u16 ; |
107 | u64 ; |
108 | u16 ; |
109 | u16 ; |
110 | u32 ; |
111 | |
112 | /* Following only valid on IVHD type 11h and 40h */ |
113 | u64 ; /* Exact copy of MMIO_EXT_FEATURES */ |
114 | u64 ; |
115 | } __attribute__((packed)); |
116 | |
117 | /* |
118 | * A device entry describing which devices a specific IOMMU translates and |
119 | * which requestor ids they use. |
120 | */ |
121 | struct ivhd_entry { |
122 | u8 type; |
123 | u16 devid; |
124 | u8 flags; |
125 | struct_group(ext_hid, |
126 | u32 ext; |
127 | u32 hidh; |
128 | ); |
129 | u64 cid; |
130 | u8 uidf; |
131 | u8 uidl; |
132 | u8 uid; |
133 | } __attribute__((packed)); |
134 | |
135 | /* |
136 | * An AMD IOMMU memory definition structure. It defines things like exclusion |
137 | * ranges for devices and regions that should be unity mapped. |
138 | */ |
139 | struct { |
140 | u8 ; |
141 | u8 ; |
142 | u16 ; |
143 | u16 ; |
144 | u16 ; |
145 | u16 ; |
146 | u8 [6]; |
147 | u64 ; |
148 | u64 ; |
149 | } __attribute__((packed)); |
150 | |
151 | bool amd_iommu_dump; |
152 | bool amd_iommu_irq_remap __read_mostly; |
153 | |
154 | enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1; |
155 | /* Guest page table level */ |
156 | int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL; |
157 | |
158 | int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; |
159 | static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; |
160 | |
161 | static bool amd_iommu_detected; |
162 | static bool amd_iommu_disabled __initdata; |
163 | static bool amd_iommu_force_enable __initdata; |
164 | static bool amd_iommu_irtcachedis; |
165 | static int amd_iommu_target_ivhd_type; |
166 | |
167 | /* Global EFR and EFR2 registers */ |
168 | u64 amd_iommu_efr; |
169 | u64 amd_iommu_efr2; |
170 | |
171 | /* SNP is enabled on the system? */ |
172 | bool amd_iommu_snp_en; |
173 | EXPORT_SYMBOL(amd_iommu_snp_en); |
174 | |
175 | LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */ |
176 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
177 | system */ |
178 | |
179 | /* Array to assign indices to IOMMUs*/ |
180 | struct amd_iommu *amd_iommus[MAX_IOMMUS]; |
181 | |
182 | /* Number of IOMMUs present in the system */ |
183 | static int amd_iommus_present; |
184 | |
185 | /* IOMMUs have a non-present cache? */ |
186 | bool amd_iommu_np_cache __read_mostly; |
187 | bool amd_iommu_iotlb_sup __read_mostly = true; |
188 | |
189 | static bool amd_iommu_pc_present __read_mostly; |
190 | bool amdr_ivrs_remap_support __read_mostly; |
191 | |
192 | bool amd_iommu_force_isolation __read_mostly; |
193 | |
194 | /* |
195 | * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap |
196 | * to know which ones are already in use. |
197 | */ |
198 | unsigned long *amd_iommu_pd_alloc_bitmap; |
199 | |
200 | enum iommu_init_state { |
201 | IOMMU_START_STATE, |
202 | IOMMU_IVRS_DETECTED, |
203 | IOMMU_ACPI_FINISHED, |
204 | IOMMU_ENABLED, |
205 | IOMMU_PCI_INIT, |
206 | IOMMU_INTERRUPTS_EN, |
207 | IOMMU_INITIALIZED, |
208 | IOMMU_NOT_FOUND, |
209 | IOMMU_INIT_ERROR, |
210 | IOMMU_CMDLINE_DISABLED, |
211 | }; |
212 | |
213 | /* Early ioapic and hpet maps from kernel command line */ |
214 | #define EARLY_MAP_SIZE 4 |
215 | static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; |
216 | static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; |
217 | static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; |
218 | |
219 | static int __initdata early_ioapic_map_size; |
220 | static int __initdata early_hpet_map_size; |
221 | static int __initdata early_acpihid_map_size; |
222 | |
223 | static bool __initdata cmdline_maps; |
224 | |
225 | static enum iommu_init_state init_state = IOMMU_START_STATE; |
226 | |
227 | static int amd_iommu_enable_interrupts(void); |
228 | static int __init iommu_go_to_state(enum iommu_init_state state); |
229 | static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg); |
230 | |
231 | static bool amd_iommu_pre_enabled = true; |
232 | |
233 | static u32 amd_iommu_ivinfo __initdata; |
234 | |
235 | bool translation_pre_enabled(struct amd_iommu *iommu) |
236 | { |
237 | return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); |
238 | } |
239 | |
240 | static void clear_translation_pre_enabled(struct amd_iommu *iommu) |
241 | { |
242 | iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; |
243 | } |
244 | |
245 | static void init_translation_status(struct amd_iommu *iommu) |
246 | { |
247 | u64 ctrl; |
248 | |
249 | ctrl = readq(addr: iommu->mmio_base + MMIO_CONTROL_OFFSET); |
250 | if (ctrl & (1<<CONTROL_IOMMU_EN)) |
251 | iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; |
252 | } |
253 | |
254 | static inline unsigned long tbl_size(int entry_size, int last_bdf) |
255 | { |
256 | unsigned shift = PAGE_SHIFT + |
257 | get_order(size: (last_bdf + 1) * entry_size); |
258 | |
259 | return 1UL << shift; |
260 | } |
261 | |
262 | int amd_iommu_get_num_iommus(void) |
263 | { |
264 | return amd_iommus_present; |
265 | } |
266 | |
267 | /* |
268 | * Iterate through all the IOMMUs to get common EFR |
269 | * masks among all IOMMUs and warn if found inconsistency. |
270 | */ |
271 | static __init void get_global_efr(void) |
272 | { |
273 | struct amd_iommu *iommu; |
274 | |
275 | for_each_iommu(iommu) { |
276 | u64 tmp = iommu->features; |
277 | u64 tmp2 = iommu->features2; |
278 | |
279 | if (list_is_first(list: &iommu->list, head: &amd_iommu_list)) { |
280 | amd_iommu_efr = tmp; |
281 | amd_iommu_efr2 = tmp2; |
282 | continue; |
283 | } |
284 | |
285 | if (amd_iommu_efr == tmp && |
286 | amd_iommu_efr2 == tmp2) |
287 | continue; |
288 | |
289 | pr_err(FW_BUG |
290 | "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n" , |
291 | tmp, tmp2, amd_iommu_efr, amd_iommu_efr2, |
292 | iommu->index, iommu->pci_seg->id, |
293 | PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), |
294 | PCI_FUNC(iommu->devid)); |
295 | |
296 | amd_iommu_efr &= tmp; |
297 | amd_iommu_efr2 &= tmp2; |
298 | } |
299 | |
300 | pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n" , amd_iommu_efr, amd_iommu_efr2); |
301 | } |
302 | |
303 | /* |
304 | * For IVHD type 0x11/0x40, EFR is also available via IVHD. |
305 | * Default to IVHD EFR since it is available sooner |
306 | * (i.e. before PCI init). |
307 | */ |
308 | static void __init early_iommu_features_init(struct amd_iommu *iommu, |
309 | struct ivhd_header *h) |
310 | { |
311 | if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) { |
312 | iommu->features = h->efr_reg; |
313 | iommu->features2 = h->efr_reg2; |
314 | } |
315 | if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP) |
316 | amdr_ivrs_remap_support = true; |
317 | } |
318 | |
319 | /* Access to l1 and l2 indexed register spaces */ |
320 | |
321 | static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) |
322 | { |
323 | u32 val; |
324 | |
325 | pci_write_config_dword(dev: iommu->dev, where: 0xf8, val: (address | l1 << 16)); |
326 | pci_read_config_dword(dev: iommu->dev, where: 0xfc, val: &val); |
327 | return val; |
328 | } |
329 | |
330 | static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) |
331 | { |
332 | pci_write_config_dword(dev: iommu->dev, where: 0xf8, val: (address | l1 << 16 | 1 << 31)); |
333 | pci_write_config_dword(dev: iommu->dev, where: 0xfc, val); |
334 | pci_write_config_dword(dev: iommu->dev, where: 0xf8, val: (address | l1 << 16)); |
335 | } |
336 | |
337 | static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) |
338 | { |
339 | u32 val; |
340 | |
341 | pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: address); |
342 | pci_read_config_dword(dev: iommu->dev, where: 0xf4, val: &val); |
343 | return val; |
344 | } |
345 | |
346 | static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) |
347 | { |
348 | pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: (address | 1 << 8)); |
349 | pci_write_config_dword(dev: iommu->dev, where: 0xf4, val); |
350 | } |
351 | |
352 | /**************************************************************************** |
353 | * |
354 | * AMD IOMMU MMIO register space handling functions |
355 | * |
356 | * These functions are used to program the IOMMU device registers in |
357 | * MMIO space required for that driver. |
358 | * |
359 | ****************************************************************************/ |
360 | |
361 | /* |
362 | * This function set the exclusion range in the IOMMU. DMA accesses to the |
363 | * exclusion range are passed through untranslated |
364 | */ |
365 | static void iommu_set_exclusion_range(struct amd_iommu *iommu) |
366 | { |
367 | u64 start = iommu->exclusion_start & PAGE_MASK; |
368 | u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; |
369 | u64 entry; |
370 | |
371 | if (!iommu->exclusion_start) |
372 | return; |
373 | |
374 | entry = start | MMIO_EXCL_ENABLE_MASK; |
375 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, |
376 | &entry, sizeof(entry)); |
377 | |
378 | entry = limit; |
379 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, |
380 | &entry, sizeof(entry)); |
381 | } |
382 | |
383 | static void iommu_set_cwwb_range(struct amd_iommu *iommu) |
384 | { |
385 | u64 start = iommu_virt_to_phys(vaddr: (void *)iommu->cmd_sem); |
386 | u64 entry = start & PM_ADDR_MASK; |
387 | |
388 | if (!check_feature(FEATURE_SNP)) |
389 | return; |
390 | |
391 | /* Note: |
392 | * Re-purpose Exclusion base/limit registers for Completion wait |
393 | * write-back base/limit. |
394 | */ |
395 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, |
396 | &entry, sizeof(entry)); |
397 | |
398 | /* Note: |
399 | * Default to 4 Kbytes, which can be specified by setting base |
400 | * address equal to the limit address. |
401 | */ |
402 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, |
403 | &entry, sizeof(entry)); |
404 | } |
405 | |
406 | /* Programs the physical address of the device table into the IOMMU hardware */ |
407 | static void iommu_set_device_table(struct amd_iommu *iommu) |
408 | { |
409 | u64 entry; |
410 | u32 dev_table_size = iommu->pci_seg->dev_table_size; |
411 | void *dev_table = (void *)get_dev_table(iommu); |
412 | |
413 | BUG_ON(iommu->mmio_base == NULL); |
414 | |
415 | entry = iommu_virt_to_phys(vaddr: dev_table); |
416 | entry |= (dev_table_size >> 12) - 1; |
417 | memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, |
418 | &entry, sizeof(entry)); |
419 | } |
420 | |
421 | /* Generic functions to enable/disable certain features of the IOMMU. */ |
422 | static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) |
423 | { |
424 | u64 ctrl; |
425 | |
426 | ctrl = readq(addr: iommu->mmio_base + MMIO_CONTROL_OFFSET); |
427 | ctrl |= (1ULL << bit); |
428 | writeq(val: ctrl, addr: iommu->mmio_base + MMIO_CONTROL_OFFSET); |
429 | } |
430 | |
431 | static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) |
432 | { |
433 | u64 ctrl; |
434 | |
435 | ctrl = readq(addr: iommu->mmio_base + MMIO_CONTROL_OFFSET); |
436 | ctrl &= ~(1ULL << bit); |
437 | writeq(val: ctrl, addr: iommu->mmio_base + MMIO_CONTROL_OFFSET); |
438 | } |
439 | |
440 | static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) |
441 | { |
442 | u64 ctrl; |
443 | |
444 | ctrl = readq(addr: iommu->mmio_base + MMIO_CONTROL_OFFSET); |
445 | ctrl &= ~CTRL_INV_TO_MASK; |
446 | ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; |
447 | writeq(val: ctrl, addr: iommu->mmio_base + MMIO_CONTROL_OFFSET); |
448 | } |
449 | |
450 | /* Function to enable the hardware */ |
451 | static void iommu_enable(struct amd_iommu *iommu) |
452 | { |
453 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
454 | } |
455 | |
456 | static void iommu_disable(struct amd_iommu *iommu) |
457 | { |
458 | if (!iommu->mmio_base) |
459 | return; |
460 | |
461 | /* Disable command buffer */ |
462 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); |
463 | |
464 | /* Disable event logging and event interrupts */ |
465 | iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); |
466 | iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); |
467 | |
468 | /* Disable IOMMU GA_LOG */ |
469 | iommu_feature_disable(iommu, CONTROL_GALOG_EN); |
470 | iommu_feature_disable(iommu, CONTROL_GAINT_EN); |
471 | |
472 | /* Disable IOMMU PPR logging */ |
473 | iommu_feature_disable(iommu, CONTROL_PPRLOG_EN); |
474 | iommu_feature_disable(iommu, CONTROL_PPRINT_EN); |
475 | |
476 | /* Disable IOMMU hardware itself */ |
477 | iommu_feature_disable(iommu, CONTROL_IOMMU_EN); |
478 | |
479 | /* Clear IRTE cache disabling bit */ |
480 | iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); |
481 | } |
482 | |
483 | /* |
484 | * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in |
485 | * the system has one. |
486 | */ |
487 | static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) |
488 | { |
489 | if (!request_mem_region(address, end, "amd_iommu" )) { |
490 | pr_err("Can not reserve memory region %llx-%llx for mmio\n" , |
491 | address, end); |
492 | pr_err("This is a BIOS bug. Please contact your hardware vendor\n" ); |
493 | return NULL; |
494 | } |
495 | |
496 | return (u8 __iomem *)ioremap(offset: address, size: end); |
497 | } |
498 | |
499 | static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) |
500 | { |
501 | if (iommu->mmio_base) |
502 | iounmap(addr: iommu->mmio_base); |
503 | release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); |
504 | } |
505 | |
506 | static inline u32 (struct ivhd_header *h) |
507 | { |
508 | u32 size = 0; |
509 | |
510 | switch (h->type) { |
511 | case 0x10: |
512 | size = 24; |
513 | break; |
514 | case 0x11: |
515 | case 0x40: |
516 | size = 40; |
517 | break; |
518 | } |
519 | return size; |
520 | } |
521 | |
522 | /**************************************************************************** |
523 | * |
524 | * The functions below belong to the first pass of AMD IOMMU ACPI table |
525 | * parsing. In this pass we try to find out the highest device id this |
526 | * code has to handle. Upon this information the size of the shared data |
527 | * structures is determined later. |
528 | * |
529 | ****************************************************************************/ |
530 | |
531 | /* |
532 | * This function calculates the length of a given IVHD entry |
533 | */ |
534 | static inline int ivhd_entry_length(u8 *ivhd) |
535 | { |
536 | u32 type = ((struct ivhd_entry *)ivhd)->type; |
537 | |
538 | if (type < 0x80) { |
539 | return 0x04 << (*ivhd >> 6); |
540 | } else if (type == IVHD_DEV_ACPI_HID) { |
541 | /* For ACPI_HID, offset 21 is uid len */ |
542 | return *((u8 *)ivhd + 21) + 22; |
543 | } |
544 | return 0; |
545 | } |
546 | |
547 | /* |
548 | * After reading the highest device id from the IOMMU PCI capability header |
549 | * this function looks if there is a higher device id defined in the ACPI table |
550 | */ |
551 | static int __init find_last_devid_from_ivhd(struct ivhd_header *h) |
552 | { |
553 | u8 *p = (void *)h, *end = (void *)h; |
554 | struct ivhd_entry *dev; |
555 | int last_devid = -EINVAL; |
556 | |
557 | u32 ivhd_size = get_ivhd_header_size(h); |
558 | |
559 | if (!ivhd_size) { |
560 | pr_err("Unsupported IVHD type %#x\n" , h->type); |
561 | return -EINVAL; |
562 | } |
563 | |
564 | p += ivhd_size; |
565 | end += h->length; |
566 | |
567 | while (p < end) { |
568 | dev = (struct ivhd_entry *)p; |
569 | switch (dev->type) { |
570 | case IVHD_DEV_ALL: |
571 | /* Use maximum BDF value for DEV_ALL */ |
572 | return 0xffff; |
573 | case IVHD_DEV_SELECT: |
574 | case IVHD_DEV_RANGE_END: |
575 | case IVHD_DEV_ALIAS: |
576 | case IVHD_DEV_EXT_SELECT: |
577 | /* all the above subfield types refer to device ids */ |
578 | if (dev->devid > last_devid) |
579 | last_devid = dev->devid; |
580 | break; |
581 | default: |
582 | break; |
583 | } |
584 | p += ivhd_entry_length(ivhd: p); |
585 | } |
586 | |
587 | WARN_ON(p != end); |
588 | |
589 | return last_devid; |
590 | } |
591 | |
592 | static int __init check_ivrs_checksum(struct acpi_table_header *table) |
593 | { |
594 | int i; |
595 | u8 checksum = 0, *p = (u8 *)table; |
596 | |
597 | for (i = 0; i < table->length; ++i) |
598 | checksum += p[i]; |
599 | if (checksum != 0) { |
600 | /* ACPI table corrupt */ |
601 | pr_err(FW_BUG "IVRS invalid checksum\n" ); |
602 | return -ENODEV; |
603 | } |
604 | |
605 | return 0; |
606 | } |
607 | |
608 | /* |
609 | * Iterate over all IVHD entries in the ACPI table and find the highest device |
610 | * id which we need to handle. This is the first of three functions which parse |
611 | * the ACPI table. So we check the checksum here. |
612 | */ |
613 | static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg) |
614 | { |
615 | u8 *p = (u8 *)table, *end = (u8 *)table; |
616 | struct ivhd_header *h; |
617 | int last_devid, last_bdf = 0; |
618 | |
619 | p += IVRS_HEADER_LENGTH; |
620 | |
621 | end += table->length; |
622 | while (p < end) { |
623 | h = (struct ivhd_header *)p; |
624 | if (h->pci_seg == pci_seg && |
625 | h->type == amd_iommu_target_ivhd_type) { |
626 | last_devid = find_last_devid_from_ivhd(h); |
627 | |
628 | if (last_devid < 0) |
629 | return -EINVAL; |
630 | if (last_devid > last_bdf) |
631 | last_bdf = last_devid; |
632 | } |
633 | p += h->length; |
634 | } |
635 | WARN_ON(p != end); |
636 | |
637 | return last_bdf; |
638 | } |
639 | |
640 | /**************************************************************************** |
641 | * |
642 | * The following functions belong to the code path which parses the ACPI table |
643 | * the second time. In this ACPI parsing iteration we allocate IOMMU specific |
644 | * data structures, initialize the per PCI segment device/alias/rlookup table |
645 | * and also basically initialize the hardware. |
646 | * |
647 | ****************************************************************************/ |
648 | |
649 | /* Allocate per PCI segment device table */ |
650 | static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) |
651 | { |
652 | pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, |
653 | order: get_order(size: pci_seg->dev_table_size)); |
654 | if (!pci_seg->dev_table) |
655 | return -ENOMEM; |
656 | |
657 | return 0; |
658 | } |
659 | |
660 | static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg) |
661 | { |
662 | free_pages(addr: (unsigned long)pci_seg->dev_table, |
663 | order: get_order(size: pci_seg->dev_table_size)); |
664 | pci_seg->dev_table = NULL; |
665 | } |
666 | |
667 | /* Allocate per PCI segment IOMMU rlookup table. */ |
668 | static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) |
669 | { |
670 | pci_seg->rlookup_table = (void *)__get_free_pages( |
671 | GFP_KERNEL | __GFP_ZERO, |
672 | order: get_order(size: pci_seg->rlookup_table_size)); |
673 | if (pci_seg->rlookup_table == NULL) |
674 | return -ENOMEM; |
675 | |
676 | return 0; |
677 | } |
678 | |
679 | static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg) |
680 | { |
681 | free_pages(addr: (unsigned long)pci_seg->rlookup_table, |
682 | order: get_order(size: pci_seg->rlookup_table_size)); |
683 | pci_seg->rlookup_table = NULL; |
684 | } |
685 | |
686 | static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) |
687 | { |
688 | pci_seg->irq_lookup_table = (void *)__get_free_pages( |
689 | GFP_KERNEL | __GFP_ZERO, |
690 | order: get_order(size: pci_seg->rlookup_table_size)); |
691 | kmemleak_alloc(ptr: pci_seg->irq_lookup_table, |
692 | size: pci_seg->rlookup_table_size, min_count: 1, GFP_KERNEL); |
693 | if (pci_seg->irq_lookup_table == NULL) |
694 | return -ENOMEM; |
695 | |
696 | return 0; |
697 | } |
698 | |
699 | static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) |
700 | { |
701 | kmemleak_free(ptr: pci_seg->irq_lookup_table); |
702 | free_pages(addr: (unsigned long)pci_seg->irq_lookup_table, |
703 | order: get_order(size: pci_seg->rlookup_table_size)); |
704 | pci_seg->irq_lookup_table = NULL; |
705 | } |
706 | |
707 | static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) |
708 | { |
709 | int i; |
710 | |
711 | pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL, |
712 | order: get_order(size: pci_seg->alias_table_size)); |
713 | if (!pci_seg->alias_table) |
714 | return -ENOMEM; |
715 | |
716 | /* |
717 | * let all alias entries point to itself |
718 | */ |
719 | for (i = 0; i <= pci_seg->last_bdf; ++i) |
720 | pci_seg->alias_table[i] = i; |
721 | |
722 | return 0; |
723 | } |
724 | |
725 | static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) |
726 | { |
727 | free_pages(addr: (unsigned long)pci_seg->alias_table, |
728 | order: get_order(size: pci_seg->alias_table_size)); |
729 | pci_seg->alias_table = NULL; |
730 | } |
731 | |
732 | /* |
733 | * Allocates the command buffer. This buffer is per AMD IOMMU. We can |
734 | * write commands to that buffer later and the IOMMU will execute them |
735 | * asynchronously |
736 | */ |
737 | static int __init alloc_command_buffer(struct amd_iommu *iommu) |
738 | { |
739 | iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
740 | order: get_order(CMD_BUFFER_SIZE)); |
741 | |
742 | return iommu->cmd_buf ? 0 : -ENOMEM; |
743 | } |
744 | |
745 | /* |
746 | * Interrupt handler has processed all pending events and adjusted head |
747 | * and tail pointer. Reset overflow mask and restart logging again. |
748 | */ |
749 | static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, |
750 | u8 cntrl_intr, u8 cntrl_log, |
751 | u32 status_run_mask, u32 status_overflow_mask) |
752 | { |
753 | u32 status; |
754 | |
755 | status = readl(addr: iommu->mmio_base + MMIO_STATUS_OFFSET); |
756 | if (status & status_run_mask) |
757 | return; |
758 | |
759 | pr_info_ratelimited("IOMMU %s log restarting\n" , evt_type); |
760 | |
761 | iommu_feature_disable(iommu, bit: cntrl_log); |
762 | iommu_feature_disable(iommu, bit: cntrl_intr); |
763 | |
764 | writel(val: status_overflow_mask, addr: iommu->mmio_base + MMIO_STATUS_OFFSET); |
765 | |
766 | iommu_feature_enable(iommu, bit: cntrl_intr); |
767 | iommu_feature_enable(iommu, bit: cntrl_log); |
768 | } |
769 | |
770 | /* |
771 | * This function restarts event logging in case the IOMMU experienced |
772 | * an event log buffer overflow. |
773 | */ |
774 | void amd_iommu_restart_event_logging(struct amd_iommu *iommu) |
775 | { |
776 | amd_iommu_restart_log(iommu, evt_type: "Event" , CONTROL_EVT_INT_EN, |
777 | CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK, |
778 | MMIO_STATUS_EVT_OVERFLOW_MASK); |
779 | } |
780 | |
781 | /* |
782 | * This function restarts event logging in case the IOMMU experienced |
783 | * GA log overflow. |
784 | */ |
785 | void amd_iommu_restart_ga_log(struct amd_iommu *iommu) |
786 | { |
787 | amd_iommu_restart_log(iommu, evt_type: "GA" , CONTROL_GAINT_EN, |
788 | CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK, |
789 | MMIO_STATUS_GALOG_OVERFLOW_MASK); |
790 | } |
791 | |
792 | /* |
793 | * This function restarts ppr logging in case the IOMMU experienced |
794 | * PPR log overflow. |
795 | */ |
796 | void amd_iommu_restart_ppr_log(struct amd_iommu *iommu) |
797 | { |
798 | amd_iommu_restart_log(iommu, evt_type: "PPR" , CONTROL_PPRINT_EN, |
799 | CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK, |
800 | MMIO_STATUS_PPR_OVERFLOW_MASK); |
801 | } |
802 | |
803 | /* |
804 | * This function resets the command buffer if the IOMMU stopped fetching |
805 | * commands from it. |
806 | */ |
807 | static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) |
808 | { |
809 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); |
810 | |
811 | writel(val: 0x00, addr: iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); |
812 | writel(val: 0x00, addr: iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
813 | iommu->cmd_buf_head = 0; |
814 | iommu->cmd_buf_tail = 0; |
815 | |
816 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); |
817 | } |
818 | |
819 | /* |
820 | * This function writes the command buffer address to the hardware and |
821 | * enables it. |
822 | */ |
823 | static void iommu_enable_command_buffer(struct amd_iommu *iommu) |
824 | { |
825 | u64 entry; |
826 | |
827 | BUG_ON(iommu->cmd_buf == NULL); |
828 | |
829 | entry = iommu_virt_to_phys(vaddr: iommu->cmd_buf); |
830 | entry |= MMIO_CMD_SIZE_512; |
831 | |
832 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
833 | &entry, sizeof(entry)); |
834 | |
835 | amd_iommu_reset_cmd_buffer(iommu); |
836 | } |
837 | |
838 | /* |
839 | * This function disables the command buffer |
840 | */ |
841 | static void iommu_disable_command_buffer(struct amd_iommu *iommu) |
842 | { |
843 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); |
844 | } |
845 | |
846 | static void __init free_command_buffer(struct amd_iommu *iommu) |
847 | { |
848 | free_pages(addr: (unsigned long)iommu->cmd_buf, order: get_order(CMD_BUFFER_SIZE)); |
849 | } |
850 | |
851 | static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, |
852 | gfp_t gfp, size_t size) |
853 | { |
854 | int order = get_order(size); |
855 | void *buf = (void *)__get_free_pages(gfp_mask: gfp, order); |
856 | |
857 | if (buf && |
858 | check_feature(FEATURE_SNP) && |
859 | set_memory_4k(addr: (unsigned long)buf, numpages: (1 << order))) { |
860 | free_pages(addr: (unsigned long)buf, order); |
861 | buf = NULL; |
862 | } |
863 | |
864 | return buf; |
865 | } |
866 | |
867 | /* allocates the memory where the IOMMU will log its events to */ |
868 | static int __init alloc_event_buffer(struct amd_iommu *iommu) |
869 | { |
870 | iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, |
871 | EVT_BUFFER_SIZE); |
872 | |
873 | return iommu->evt_buf ? 0 : -ENOMEM; |
874 | } |
875 | |
876 | static void iommu_enable_event_buffer(struct amd_iommu *iommu) |
877 | { |
878 | u64 entry; |
879 | |
880 | BUG_ON(iommu->evt_buf == NULL); |
881 | |
882 | entry = iommu_virt_to_phys(vaddr: iommu->evt_buf) | EVT_LEN_MASK; |
883 | |
884 | memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, |
885 | &entry, sizeof(entry)); |
886 | |
887 | /* set head and tail to zero manually */ |
888 | writel(val: 0x00, addr: iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); |
889 | writel(val: 0x00, addr: iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); |
890 | |
891 | iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); |
892 | } |
893 | |
894 | /* |
895 | * This function disables the event log buffer |
896 | */ |
897 | static void iommu_disable_event_buffer(struct amd_iommu *iommu) |
898 | { |
899 | iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); |
900 | } |
901 | |
902 | static void __init free_event_buffer(struct amd_iommu *iommu) |
903 | { |
904 | free_pages(addr: (unsigned long)iommu->evt_buf, order: get_order(EVT_BUFFER_SIZE)); |
905 | } |
906 | |
907 | /* allocates the memory where the IOMMU will log its events to */ |
908 | static int __init alloc_ppr_log(struct amd_iommu *iommu) |
909 | { |
910 | iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, |
911 | PPR_LOG_SIZE); |
912 | |
913 | return iommu->ppr_log ? 0 : -ENOMEM; |
914 | } |
915 | |
916 | static void iommu_enable_ppr_log(struct amd_iommu *iommu) |
917 | { |
918 | u64 entry; |
919 | |
920 | if (iommu->ppr_log == NULL) |
921 | return; |
922 | |
923 | iommu_feature_enable(iommu, CONTROL_PPR_EN); |
924 | |
925 | entry = iommu_virt_to_phys(vaddr: iommu->ppr_log) | PPR_LOG_SIZE_512; |
926 | |
927 | memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, |
928 | &entry, sizeof(entry)); |
929 | |
930 | /* set head and tail to zero manually */ |
931 | writel(val: 0x00, addr: iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
932 | writel(val: 0x00, addr: iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
933 | |
934 | iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); |
935 | iommu_feature_enable(iommu, CONTROL_PPRINT_EN); |
936 | } |
937 | |
938 | static void __init free_ppr_log(struct amd_iommu *iommu) |
939 | { |
940 | free_pages(addr: (unsigned long)iommu->ppr_log, order: get_order(PPR_LOG_SIZE)); |
941 | } |
942 | |
943 | static void free_ga_log(struct amd_iommu *iommu) |
944 | { |
945 | #ifdef CONFIG_IRQ_REMAP |
946 | free_pages(addr: (unsigned long)iommu->ga_log, order: get_order(GA_LOG_SIZE)); |
947 | free_pages(addr: (unsigned long)iommu->ga_log_tail, order: get_order(size: 8)); |
948 | #endif |
949 | } |
950 | |
951 | #ifdef CONFIG_IRQ_REMAP |
952 | static int iommu_ga_log_enable(struct amd_iommu *iommu) |
953 | { |
954 | u32 status, i; |
955 | u64 entry; |
956 | |
957 | if (!iommu->ga_log) |
958 | return -EINVAL; |
959 | |
960 | entry = iommu_virt_to_phys(vaddr: iommu->ga_log) | GA_LOG_SIZE_512; |
961 | memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, |
962 | &entry, sizeof(entry)); |
963 | entry = (iommu_virt_to_phys(vaddr: iommu->ga_log_tail) & |
964 | (BIT_ULL(52)-1)) & ~7ULL; |
965 | memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, |
966 | &entry, sizeof(entry)); |
967 | writel(val: 0x00, addr: iommu->mmio_base + MMIO_GA_HEAD_OFFSET); |
968 | writel(val: 0x00, addr: iommu->mmio_base + MMIO_GA_TAIL_OFFSET); |
969 | |
970 | |
971 | iommu_feature_enable(iommu, CONTROL_GAINT_EN); |
972 | iommu_feature_enable(iommu, CONTROL_GALOG_EN); |
973 | |
974 | for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) { |
975 | status = readl(addr: iommu->mmio_base + MMIO_STATUS_OFFSET); |
976 | if (status & (MMIO_STATUS_GALOG_RUN_MASK)) |
977 | break; |
978 | udelay(10); |
979 | } |
980 | |
981 | if (WARN_ON(i >= MMIO_STATUS_TIMEOUT)) |
982 | return -EINVAL; |
983 | |
984 | return 0; |
985 | } |
986 | |
987 | static int iommu_init_ga_log(struct amd_iommu *iommu) |
988 | { |
989 | if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) |
990 | return 0; |
991 | |
992 | iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
993 | order: get_order(GA_LOG_SIZE)); |
994 | if (!iommu->ga_log) |
995 | goto err_out; |
996 | |
997 | iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
998 | order: get_order(size: 8)); |
999 | if (!iommu->ga_log_tail) |
1000 | goto err_out; |
1001 | |
1002 | return 0; |
1003 | err_out: |
1004 | free_ga_log(iommu); |
1005 | return -EINVAL; |
1006 | } |
1007 | #endif /* CONFIG_IRQ_REMAP */ |
1008 | |
1009 | static int __init alloc_cwwb_sem(struct amd_iommu *iommu) |
1010 | { |
1011 | iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, size: 1); |
1012 | |
1013 | return iommu->cmd_sem ? 0 : -ENOMEM; |
1014 | } |
1015 | |
1016 | static void __init free_cwwb_sem(struct amd_iommu *iommu) |
1017 | { |
1018 | if (iommu->cmd_sem) |
1019 | free_page((unsigned long)iommu->cmd_sem); |
1020 | } |
1021 | |
1022 | static void iommu_enable_xt(struct amd_iommu *iommu) |
1023 | { |
1024 | #ifdef CONFIG_IRQ_REMAP |
1025 | /* |
1026 | * XT mode (32-bit APIC destination ID) requires |
1027 | * GA mode (128-bit IRTE support) as a prerequisite. |
1028 | */ |
1029 | if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && |
1030 | amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) |
1031 | iommu_feature_enable(iommu, CONTROL_XT_EN); |
1032 | #endif /* CONFIG_IRQ_REMAP */ |
1033 | } |
1034 | |
1035 | static void iommu_enable_gt(struct amd_iommu *iommu) |
1036 | { |
1037 | if (!check_feature(FEATURE_GT)) |
1038 | return; |
1039 | |
1040 | iommu_feature_enable(iommu, CONTROL_GT_EN); |
1041 | } |
1042 | |
1043 | /* sets a specific bit in the device table entry. */ |
1044 | static void __set_dev_entry_bit(struct dev_table_entry *dev_table, |
1045 | u16 devid, u8 bit) |
1046 | { |
1047 | int i = (bit >> 6) & 0x03; |
1048 | int _bit = bit & 0x3f; |
1049 | |
1050 | dev_table[devid].data[i] |= (1UL << _bit); |
1051 | } |
1052 | |
1053 | static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) |
1054 | { |
1055 | struct dev_table_entry *dev_table = get_dev_table(iommu); |
1056 | |
1057 | return __set_dev_entry_bit(dev_table, devid, bit); |
1058 | } |
1059 | |
1060 | static int __get_dev_entry_bit(struct dev_table_entry *dev_table, |
1061 | u16 devid, u8 bit) |
1062 | { |
1063 | int i = (bit >> 6) & 0x03; |
1064 | int _bit = bit & 0x3f; |
1065 | |
1066 | return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit; |
1067 | } |
1068 | |
1069 | static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) |
1070 | { |
1071 | struct dev_table_entry *dev_table = get_dev_table(iommu); |
1072 | |
1073 | return __get_dev_entry_bit(dev_table, devid, bit); |
1074 | } |
1075 | |
1076 | static bool __copy_device_table(struct amd_iommu *iommu) |
1077 | { |
1078 | u64 int_ctl, int_tab_len, entry = 0; |
1079 | struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; |
1080 | struct dev_table_entry *old_devtb = NULL; |
1081 | u32 lo, hi, devid, old_devtb_size; |
1082 | phys_addr_t old_devtb_phys; |
1083 | u16 dom_id, dte_v, irq_v; |
1084 | gfp_t gfp_flag; |
1085 | u64 tmp; |
1086 | |
1087 | /* Each IOMMU use separate device table with the same size */ |
1088 | lo = readl(addr: iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); |
1089 | hi = readl(addr: iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); |
1090 | entry = (((u64) hi) << 32) + lo; |
1091 | |
1092 | old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; |
1093 | if (old_devtb_size != pci_seg->dev_table_size) { |
1094 | pr_err("The device table size of IOMMU:%d is not expected!\n" , |
1095 | iommu->index); |
1096 | return false; |
1097 | } |
1098 | |
1099 | /* |
1100 | * When SME is enabled in the first kernel, the entry includes the |
1101 | * memory encryption mask(sme_me_mask), we must remove the memory |
1102 | * encryption mask to obtain the true physical address in kdump kernel. |
1103 | */ |
1104 | old_devtb_phys = __sme_clr(entry) & PAGE_MASK; |
1105 | |
1106 | if (old_devtb_phys >= 0x100000000ULL) { |
1107 | pr_err("The address of old device table is above 4G, not trustworthy!\n" ); |
1108 | return false; |
1109 | } |
1110 | old_devtb = (cc_platform_has(attr: CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel()) |
1111 | ? (__force void *)ioremap_encrypted(phys_addr: old_devtb_phys, |
1112 | size: pci_seg->dev_table_size) |
1113 | : memremap(offset: old_devtb_phys, size: pci_seg->dev_table_size, flags: MEMREMAP_WB); |
1114 | |
1115 | if (!old_devtb) |
1116 | return false; |
1117 | |
1118 | gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; |
1119 | pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_mask: gfp_flag, |
1120 | order: get_order(size: pci_seg->dev_table_size)); |
1121 | if (pci_seg->old_dev_tbl_cpy == NULL) { |
1122 | pr_err("Failed to allocate memory for copying old device table!\n" ); |
1123 | memunmap(addr: old_devtb); |
1124 | return false; |
1125 | } |
1126 | |
1127 | for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { |
1128 | pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid]; |
1129 | dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; |
1130 | dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; |
1131 | |
1132 | if (dte_v && dom_id) { |
1133 | pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; |
1134 | pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; |
1135 | __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); |
1136 | /* If gcr3 table existed, mask it out */ |
1137 | if (old_devtb[devid].data[0] & DTE_FLAG_GV) { |
1138 | tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; |
1139 | tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; |
1140 | pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp; |
1141 | tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; |
1142 | tmp |= DTE_FLAG_GV; |
1143 | pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp; |
1144 | } |
1145 | } |
1146 | |
1147 | irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; |
1148 | int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; |
1149 | int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK; |
1150 | if (irq_v && (int_ctl || int_tab_len)) { |
1151 | if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || |
1152 | (int_tab_len != DTE_INTTABLEN)) { |
1153 | pr_err("Wrong old irq remapping flag: %#x\n" , devid); |
1154 | memunmap(addr: old_devtb); |
1155 | return false; |
1156 | } |
1157 | |
1158 | pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; |
1159 | } |
1160 | } |
1161 | memunmap(addr: old_devtb); |
1162 | |
1163 | return true; |
1164 | } |
1165 | |
1166 | static bool copy_device_table(void) |
1167 | { |
1168 | struct amd_iommu *iommu; |
1169 | struct amd_iommu_pci_seg *pci_seg; |
1170 | |
1171 | if (!amd_iommu_pre_enabled) |
1172 | return false; |
1173 | |
1174 | pr_warn("Translation is already enabled - trying to copy translation structures\n" ); |
1175 | |
1176 | /* |
1177 | * All IOMMUs within PCI segment shares common device table. |
1178 | * Hence copy device table only once per PCI segment. |
1179 | */ |
1180 | for_each_pci_segment(pci_seg) { |
1181 | for_each_iommu(iommu) { |
1182 | if (pci_seg->id != iommu->pci_seg->id) |
1183 | continue; |
1184 | if (!__copy_device_table(iommu)) |
1185 | return false; |
1186 | break; |
1187 | } |
1188 | } |
1189 | |
1190 | return true; |
1191 | } |
1192 | |
1193 | void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid) |
1194 | { |
1195 | int sysmgt; |
1196 | |
1197 | sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) | |
1198 | (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1); |
1199 | |
1200 | if (sysmgt == 0x01) |
1201 | set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW); |
1202 | } |
1203 | |
1204 | /* |
1205 | * This function takes the device specific flags read from the ACPI |
1206 | * table and sets up the device table entry with that information |
1207 | */ |
1208 | static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, |
1209 | u16 devid, u32 flags, u32 ext_flags) |
1210 | { |
1211 | if (flags & ACPI_DEVFLAG_INITPASS) |
1212 | set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS); |
1213 | if (flags & ACPI_DEVFLAG_EXTINT) |
1214 | set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS); |
1215 | if (flags & ACPI_DEVFLAG_NMI) |
1216 | set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS); |
1217 | if (flags & ACPI_DEVFLAG_SYSMGT1) |
1218 | set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1); |
1219 | if (flags & ACPI_DEVFLAG_SYSMGT2) |
1220 | set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2); |
1221 | if (flags & ACPI_DEVFLAG_LINT0) |
1222 | set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS); |
1223 | if (flags & ACPI_DEVFLAG_LINT1) |
1224 | set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS); |
1225 | |
1226 | amd_iommu_apply_erratum_63(iommu, devid); |
1227 | |
1228 | amd_iommu_set_rlookup_table(iommu, devid); |
1229 | } |
1230 | |
1231 | int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line) |
1232 | { |
1233 | struct devid_map *entry; |
1234 | struct list_head *list; |
1235 | |
1236 | if (type == IVHD_SPECIAL_IOAPIC) |
1237 | list = &ioapic_map; |
1238 | else if (type == IVHD_SPECIAL_HPET) |
1239 | list = &hpet_map; |
1240 | else |
1241 | return -EINVAL; |
1242 | |
1243 | list_for_each_entry(entry, list, list) { |
1244 | if (!(entry->id == id && entry->cmd_line)) |
1245 | continue; |
1246 | |
1247 | pr_info("Command-line override present for %s id %d - ignoring\n" , |
1248 | type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET" , id); |
1249 | |
1250 | *devid = entry->devid; |
1251 | |
1252 | return 0; |
1253 | } |
1254 | |
1255 | entry = kzalloc(size: sizeof(*entry), GFP_KERNEL); |
1256 | if (!entry) |
1257 | return -ENOMEM; |
1258 | |
1259 | entry->id = id; |
1260 | entry->devid = *devid; |
1261 | entry->cmd_line = cmd_line; |
1262 | |
1263 | list_add_tail(new: &entry->list, head: list); |
1264 | |
1265 | return 0; |
1266 | } |
1267 | |
1268 | static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid, |
1269 | bool cmd_line) |
1270 | { |
1271 | struct acpihid_map_entry *entry; |
1272 | struct list_head *list = &acpihid_map; |
1273 | |
1274 | list_for_each_entry(entry, list, list) { |
1275 | if (strcmp(entry->hid, hid) || |
1276 | (*uid && *entry->uid && strcmp(entry->uid, uid)) || |
1277 | !entry->cmd_line) |
1278 | continue; |
1279 | |
1280 | pr_info("Command-line override for hid:%s uid:%s\n" , |
1281 | hid, uid); |
1282 | *devid = entry->devid; |
1283 | return 0; |
1284 | } |
1285 | |
1286 | entry = kzalloc(size: sizeof(*entry), GFP_KERNEL); |
1287 | if (!entry) |
1288 | return -ENOMEM; |
1289 | |
1290 | memcpy(entry->uid, uid, strlen(uid)); |
1291 | memcpy(entry->hid, hid, strlen(hid)); |
1292 | entry->devid = *devid; |
1293 | entry->cmd_line = cmd_line; |
1294 | entry->root_devid = (entry->devid & (~0x7)); |
1295 | |
1296 | pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n" , |
1297 | entry->cmd_line ? "cmd" : "ivrs" , |
1298 | entry->hid, entry->uid, entry->root_devid); |
1299 | |
1300 | list_add_tail(new: &entry->list, head: list); |
1301 | return 0; |
1302 | } |
1303 | |
1304 | static int __init add_early_maps(void) |
1305 | { |
1306 | int i, ret; |
1307 | |
1308 | for (i = 0; i < early_ioapic_map_size; ++i) { |
1309 | ret = add_special_device(IVHD_SPECIAL_IOAPIC, |
1310 | id: early_ioapic_map[i].id, |
1311 | devid: &early_ioapic_map[i].devid, |
1312 | cmd_line: early_ioapic_map[i].cmd_line); |
1313 | if (ret) |
1314 | return ret; |
1315 | } |
1316 | |
1317 | for (i = 0; i < early_hpet_map_size; ++i) { |
1318 | ret = add_special_device(IVHD_SPECIAL_HPET, |
1319 | id: early_hpet_map[i].id, |
1320 | devid: &early_hpet_map[i].devid, |
1321 | cmd_line: early_hpet_map[i].cmd_line); |
1322 | if (ret) |
1323 | return ret; |
1324 | } |
1325 | |
1326 | for (i = 0; i < early_acpihid_map_size; ++i) { |
1327 | ret = add_acpi_hid_device(hid: early_acpihid_map[i].hid, |
1328 | uid: early_acpihid_map[i].uid, |
1329 | devid: &early_acpihid_map[i].devid, |
1330 | cmd_line: early_acpihid_map[i].cmd_line); |
1331 | if (ret) |
1332 | return ret; |
1333 | } |
1334 | |
1335 | return 0; |
1336 | } |
1337 | |
1338 | /* |
1339 | * Takes a pointer to an AMD IOMMU entry in the ACPI table and |
1340 | * initializes the hardware and our data structures with it. |
1341 | */ |
1342 | static int __init init_iommu_from_acpi(struct amd_iommu *iommu, |
1343 | struct ivhd_header *h) |
1344 | { |
1345 | u8 *p = (u8 *)h; |
1346 | u8 *end = p, flags = 0; |
1347 | u16 devid = 0, devid_start = 0, devid_to = 0, seg_id; |
1348 | u32 dev_i, ext_flags = 0; |
1349 | bool alias = false; |
1350 | struct ivhd_entry *e; |
1351 | struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; |
1352 | u32 ivhd_size; |
1353 | int ret; |
1354 | |
1355 | |
1356 | ret = add_early_maps(); |
1357 | if (ret) |
1358 | return ret; |
1359 | |
1360 | amd_iommu_apply_ivrs_quirks(); |
1361 | |
1362 | /* |
1363 | * First save the recommended feature enable bits from ACPI |
1364 | */ |
1365 | iommu->acpi_flags = h->flags; |
1366 | |
1367 | /* |
1368 | * Done. Now parse the device entries |
1369 | */ |
1370 | ivhd_size = get_ivhd_header_size(h); |
1371 | if (!ivhd_size) { |
1372 | pr_err("Unsupported IVHD type %#x\n" , h->type); |
1373 | return -EINVAL; |
1374 | } |
1375 | |
1376 | p += ivhd_size; |
1377 | |
1378 | end += h->length; |
1379 | |
1380 | |
1381 | while (p < end) { |
1382 | e = (struct ivhd_entry *)p; |
1383 | seg_id = pci_seg->id; |
1384 | |
1385 | switch (e->type) { |
1386 | case IVHD_DEV_ALL: |
1387 | |
1388 | DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n" , e->flags); |
1389 | |
1390 | for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i) |
1391 | set_dev_entry_from_acpi(iommu, devid: dev_i, flags: e->flags, ext_flags: 0); |
1392 | break; |
1393 | case IVHD_DEV_SELECT: |
1394 | |
1395 | DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x " |
1396 | "flags: %02x\n" , |
1397 | seg_id, PCI_BUS_NUM(e->devid), |
1398 | PCI_SLOT(e->devid), |
1399 | PCI_FUNC(e->devid), |
1400 | e->flags); |
1401 | |
1402 | devid = e->devid; |
1403 | set_dev_entry_from_acpi(iommu, devid, flags: e->flags, ext_flags: 0); |
1404 | break; |
1405 | case IVHD_DEV_SELECT_RANGE_START: |
1406 | |
1407 | DUMP_printk(" DEV_SELECT_RANGE_START\t " |
1408 | "devid: %04x:%02x:%02x.%x flags: %02x\n" , |
1409 | seg_id, PCI_BUS_NUM(e->devid), |
1410 | PCI_SLOT(e->devid), |
1411 | PCI_FUNC(e->devid), |
1412 | e->flags); |
1413 | |
1414 | devid_start = e->devid; |
1415 | flags = e->flags; |
1416 | ext_flags = 0; |
1417 | alias = false; |
1418 | break; |
1419 | case IVHD_DEV_ALIAS: |
1420 | |
1421 | DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x " |
1422 | "flags: %02x devid_to: %02x:%02x.%x\n" , |
1423 | seg_id, PCI_BUS_NUM(e->devid), |
1424 | PCI_SLOT(e->devid), |
1425 | PCI_FUNC(e->devid), |
1426 | e->flags, |
1427 | PCI_BUS_NUM(e->ext >> 8), |
1428 | PCI_SLOT(e->ext >> 8), |
1429 | PCI_FUNC(e->ext >> 8)); |
1430 | |
1431 | devid = e->devid; |
1432 | devid_to = e->ext >> 8; |
1433 | set_dev_entry_from_acpi(iommu, devid , flags: e->flags, ext_flags: 0); |
1434 | set_dev_entry_from_acpi(iommu, devid: devid_to, flags: e->flags, ext_flags: 0); |
1435 | pci_seg->alias_table[devid] = devid_to; |
1436 | break; |
1437 | case IVHD_DEV_ALIAS_RANGE: |
1438 | |
1439 | DUMP_printk(" DEV_ALIAS_RANGE\t\t " |
1440 | "devid: %04x:%02x:%02x.%x flags: %02x " |
1441 | "devid_to: %04x:%02x:%02x.%x\n" , |
1442 | seg_id, PCI_BUS_NUM(e->devid), |
1443 | PCI_SLOT(e->devid), |
1444 | PCI_FUNC(e->devid), |
1445 | e->flags, |
1446 | seg_id, PCI_BUS_NUM(e->ext >> 8), |
1447 | PCI_SLOT(e->ext >> 8), |
1448 | PCI_FUNC(e->ext >> 8)); |
1449 | |
1450 | devid_start = e->devid; |
1451 | flags = e->flags; |
1452 | devid_to = e->ext >> 8; |
1453 | ext_flags = 0; |
1454 | alias = true; |
1455 | break; |
1456 | case IVHD_DEV_EXT_SELECT: |
1457 | |
1458 | DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x " |
1459 | "flags: %02x ext: %08x\n" , |
1460 | seg_id, PCI_BUS_NUM(e->devid), |
1461 | PCI_SLOT(e->devid), |
1462 | PCI_FUNC(e->devid), |
1463 | e->flags, e->ext); |
1464 | |
1465 | devid = e->devid; |
1466 | set_dev_entry_from_acpi(iommu, devid, flags: e->flags, |
1467 | ext_flags: e->ext); |
1468 | break; |
1469 | case IVHD_DEV_EXT_SELECT_RANGE: |
1470 | |
1471 | DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " |
1472 | "%04x:%02x:%02x.%x flags: %02x ext: %08x\n" , |
1473 | seg_id, PCI_BUS_NUM(e->devid), |
1474 | PCI_SLOT(e->devid), |
1475 | PCI_FUNC(e->devid), |
1476 | e->flags, e->ext); |
1477 | |
1478 | devid_start = e->devid; |
1479 | flags = e->flags; |
1480 | ext_flags = e->ext; |
1481 | alias = false; |
1482 | break; |
1483 | case IVHD_DEV_RANGE_END: |
1484 | |
1485 | DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n" , |
1486 | seg_id, PCI_BUS_NUM(e->devid), |
1487 | PCI_SLOT(e->devid), |
1488 | PCI_FUNC(e->devid)); |
1489 | |
1490 | devid = e->devid; |
1491 | for (dev_i = devid_start; dev_i <= devid; ++dev_i) { |
1492 | if (alias) { |
1493 | pci_seg->alias_table[dev_i] = devid_to; |
1494 | set_dev_entry_from_acpi(iommu, |
1495 | devid: devid_to, flags, ext_flags); |
1496 | } |
1497 | set_dev_entry_from_acpi(iommu, devid: dev_i, |
1498 | flags, ext_flags); |
1499 | } |
1500 | break; |
1501 | case IVHD_DEV_SPECIAL: { |
1502 | u8 handle, type; |
1503 | const char *var; |
1504 | u32 devid; |
1505 | int ret; |
1506 | |
1507 | handle = e->ext & 0xff; |
1508 | devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8)); |
1509 | type = (e->ext >> 24) & 0xff; |
1510 | |
1511 | if (type == IVHD_SPECIAL_IOAPIC) |
1512 | var = "IOAPIC" ; |
1513 | else if (type == IVHD_SPECIAL_HPET) |
1514 | var = "HPET" ; |
1515 | else |
1516 | var = "UNKNOWN" ; |
1517 | |
1518 | DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n" , |
1519 | var, (int)handle, |
1520 | seg_id, PCI_BUS_NUM(devid), |
1521 | PCI_SLOT(devid), |
1522 | PCI_FUNC(devid)); |
1523 | |
1524 | ret = add_special_device(type, id: handle, devid: &devid, cmd_line: false); |
1525 | if (ret) |
1526 | return ret; |
1527 | |
1528 | /* |
1529 | * add_special_device might update the devid in case a |
1530 | * command-line override is present. So call |
1531 | * set_dev_entry_from_acpi after add_special_device. |
1532 | */ |
1533 | set_dev_entry_from_acpi(iommu, devid, flags: e->flags, ext_flags: 0); |
1534 | |
1535 | break; |
1536 | } |
1537 | case IVHD_DEV_ACPI_HID: { |
1538 | u32 devid; |
1539 | u8 hid[ACPIHID_HID_LEN]; |
1540 | u8 uid[ACPIHID_UID_LEN]; |
1541 | int ret; |
1542 | |
1543 | if (h->type != 0x40) { |
1544 | pr_err(FW_BUG "Invalid IVHD device type %#x\n" , |
1545 | e->type); |
1546 | break; |
1547 | } |
1548 | |
1549 | BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1); |
1550 | memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1); |
1551 | hid[ACPIHID_HID_LEN - 1] = '\0'; |
1552 | |
1553 | if (!(*hid)) { |
1554 | pr_err(FW_BUG "Invalid HID.\n" ); |
1555 | break; |
1556 | } |
1557 | |
1558 | uid[0] = '\0'; |
1559 | switch (e->uidf) { |
1560 | case UID_NOT_PRESENT: |
1561 | |
1562 | if (e->uidl != 0) |
1563 | pr_warn(FW_BUG "Invalid UID length.\n" ); |
1564 | |
1565 | break; |
1566 | case UID_IS_INTEGER: |
1567 | |
1568 | sprintf(buf: uid, fmt: "%d" , e->uid); |
1569 | |
1570 | break; |
1571 | case UID_IS_CHARACTER: |
1572 | |
1573 | memcpy(uid, &e->uid, e->uidl); |
1574 | uid[e->uidl] = '\0'; |
1575 | |
1576 | break; |
1577 | default: |
1578 | break; |
1579 | } |
1580 | |
1581 | devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid); |
1582 | DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n" , |
1583 | hid, uid, seg_id, |
1584 | PCI_BUS_NUM(devid), |
1585 | PCI_SLOT(devid), |
1586 | PCI_FUNC(devid)); |
1587 | |
1588 | flags = e->flags; |
1589 | |
1590 | ret = add_acpi_hid_device(hid, uid, devid: &devid, cmd_line: false); |
1591 | if (ret) |
1592 | return ret; |
1593 | |
1594 | /* |
1595 | * add_special_device might update the devid in case a |
1596 | * command-line override is present. So call |
1597 | * set_dev_entry_from_acpi after add_special_device. |
1598 | */ |
1599 | set_dev_entry_from_acpi(iommu, devid, flags: e->flags, ext_flags: 0); |
1600 | |
1601 | break; |
1602 | } |
1603 | default: |
1604 | break; |
1605 | } |
1606 | |
1607 | p += ivhd_entry_length(ivhd: p); |
1608 | } |
1609 | |
1610 | return 0; |
1611 | } |
1612 | |
1613 | /* Allocate PCI segment data structure */ |
1614 | static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id, |
1615 | struct acpi_table_header *ivrs_base) |
1616 | { |
1617 | struct amd_iommu_pci_seg *pci_seg; |
1618 | int last_bdf; |
1619 | |
1620 | /* |
1621 | * First parse ACPI tables to find the largest Bus/Dev/Func we need to |
1622 | * handle in this PCI segment. Upon this information the shared data |
1623 | * structures for the PCI segments in the system will be allocated. |
1624 | */ |
1625 | last_bdf = find_last_devid_acpi(table: ivrs_base, pci_seg: id); |
1626 | if (last_bdf < 0) |
1627 | return NULL; |
1628 | |
1629 | pci_seg = kzalloc(size: sizeof(struct amd_iommu_pci_seg), GFP_KERNEL); |
1630 | if (pci_seg == NULL) |
1631 | return NULL; |
1632 | |
1633 | pci_seg->last_bdf = last_bdf; |
1634 | DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n" , id, last_bdf); |
1635 | pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf); |
1636 | pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf); |
1637 | pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf); |
1638 | |
1639 | pci_seg->id = id; |
1640 | init_llist_head(list: &pci_seg->dev_data_list); |
1641 | INIT_LIST_HEAD(list: &pci_seg->unity_map); |
1642 | list_add_tail(new: &pci_seg->list, head: &amd_iommu_pci_seg_list); |
1643 | |
1644 | if (alloc_dev_table(pci_seg)) |
1645 | return NULL; |
1646 | if (alloc_alias_table(pci_seg)) |
1647 | return NULL; |
1648 | if (alloc_rlookup_table(pci_seg)) |
1649 | return NULL; |
1650 | |
1651 | return pci_seg; |
1652 | } |
1653 | |
1654 | static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id, |
1655 | struct acpi_table_header *ivrs_base) |
1656 | { |
1657 | struct amd_iommu_pci_seg *pci_seg; |
1658 | |
1659 | for_each_pci_segment(pci_seg) { |
1660 | if (pci_seg->id == id) |
1661 | return pci_seg; |
1662 | } |
1663 | |
1664 | return alloc_pci_segment(id, ivrs_base); |
1665 | } |
1666 | |
1667 | static void __init free_pci_segments(void) |
1668 | { |
1669 | struct amd_iommu_pci_seg *pci_seg, *next; |
1670 | |
1671 | for_each_pci_segment_safe(pci_seg, next) { |
1672 | list_del(entry: &pci_seg->list); |
1673 | free_irq_lookup_table(pci_seg); |
1674 | free_rlookup_table(pci_seg); |
1675 | free_alias_table(pci_seg); |
1676 | free_dev_table(pci_seg); |
1677 | kfree(objp: pci_seg); |
1678 | } |
1679 | } |
1680 | |
1681 | static void __init free_iommu_one(struct amd_iommu *iommu) |
1682 | { |
1683 | free_cwwb_sem(iommu); |
1684 | free_command_buffer(iommu); |
1685 | free_event_buffer(iommu); |
1686 | free_ppr_log(iommu); |
1687 | free_ga_log(iommu); |
1688 | iommu_unmap_mmio_space(iommu); |
1689 | } |
1690 | |
1691 | static void __init free_iommu_all(void) |
1692 | { |
1693 | struct amd_iommu *iommu, *next; |
1694 | |
1695 | for_each_iommu_safe(iommu, next) { |
1696 | list_del(entry: &iommu->list); |
1697 | free_iommu_one(iommu); |
1698 | kfree(objp: iommu); |
1699 | } |
1700 | } |
1701 | |
1702 | /* |
1703 | * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) |
1704 | * Workaround: |
1705 | * BIOS should disable L2B micellaneous clock gating by setting |
1706 | * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b |
1707 | */ |
1708 | static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) |
1709 | { |
1710 | u32 value; |
1711 | |
1712 | if ((boot_cpu_data.x86 != 0x15) || |
1713 | (boot_cpu_data.x86_model < 0x10) || |
1714 | (boot_cpu_data.x86_model > 0x1f)) |
1715 | return; |
1716 | |
1717 | pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: 0x90); |
1718 | pci_read_config_dword(dev: iommu->dev, where: 0xf4, val: &value); |
1719 | |
1720 | if (value & BIT(2)) |
1721 | return; |
1722 | |
1723 | /* Select NB indirect register 0x90 and enable writing */ |
1724 | pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: 0x90 | (1 << 8)); |
1725 | |
1726 | pci_write_config_dword(dev: iommu->dev, where: 0xf4, val: value | 0x4); |
1727 | pci_info(iommu->dev, "Applying erratum 746 workaround\n" ); |
1728 | |
1729 | /* Clear the enable writing bit */ |
1730 | pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: 0x90); |
1731 | } |
1732 | |
1733 | /* |
1734 | * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) |
1735 | * Workaround: |
1736 | * BIOS should enable ATS write permission check by setting |
1737 | * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b |
1738 | */ |
1739 | static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) |
1740 | { |
1741 | u32 value; |
1742 | |
1743 | if ((boot_cpu_data.x86 != 0x15) || |
1744 | (boot_cpu_data.x86_model < 0x30) || |
1745 | (boot_cpu_data.x86_model > 0x3f)) |
1746 | return; |
1747 | |
1748 | /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ |
1749 | value = iommu_read_l2(iommu, address: 0x47); |
1750 | |
1751 | if (value & BIT(0)) |
1752 | return; |
1753 | |
1754 | /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ |
1755 | iommu_write_l2(iommu, address: 0x47, val: value | BIT(0)); |
1756 | |
1757 | pci_info(iommu->dev, "Applying ATS write check workaround\n" ); |
1758 | } |
1759 | |
1760 | /* |
1761 | * This function glues the initialization function for one IOMMU |
1762 | * together and also allocates the command buffer and programs the |
1763 | * hardware. It does NOT enable the IOMMU. This is done afterwards. |
1764 | */ |
1765 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, |
1766 | struct acpi_table_header *ivrs_base) |
1767 | { |
1768 | struct amd_iommu_pci_seg *pci_seg; |
1769 | |
1770 | pci_seg = get_pci_segment(id: h->pci_seg, ivrs_base); |
1771 | if (pci_seg == NULL) |
1772 | return -ENOMEM; |
1773 | iommu->pci_seg = pci_seg; |
1774 | |
1775 | raw_spin_lock_init(&iommu->lock); |
1776 | atomic64_set(v: &iommu->cmd_sem_val, i: 0); |
1777 | |
1778 | /* Add IOMMU to internal data structures */ |
1779 | list_add_tail(new: &iommu->list, head: &amd_iommu_list); |
1780 | iommu->index = amd_iommus_present++; |
1781 | |
1782 | if (unlikely(iommu->index >= MAX_IOMMUS)) { |
1783 | WARN(1, "System has more IOMMUs than supported by this driver\n" ); |
1784 | return -ENOSYS; |
1785 | } |
1786 | |
1787 | /* Index is fine - add IOMMU to the array */ |
1788 | amd_iommus[iommu->index] = iommu; |
1789 | |
1790 | /* |
1791 | * Copy data from ACPI table entry to the iommu struct |
1792 | */ |
1793 | iommu->devid = h->devid; |
1794 | iommu->cap_ptr = h->cap_ptr; |
1795 | iommu->mmio_phys = h->mmio_phys; |
1796 | |
1797 | switch (h->type) { |
1798 | case 0x10: |
1799 | /* Check if IVHD EFR contains proper max banks/counters */ |
1800 | if ((h->efr_attr != 0) && |
1801 | ((h->efr_attr & (0xF << 13)) != 0) && |
1802 | ((h->efr_attr & (0x3F << 17)) != 0)) |
1803 | iommu->mmio_phys_end = MMIO_REG_END_OFFSET; |
1804 | else |
1805 | iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; |
1806 | |
1807 | /* |
1808 | * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. |
1809 | * GAM also requires GA mode. Therefore, we need to |
1810 | * check cmpxchg16b support before enabling it. |
1811 | */ |
1812 | if (!boot_cpu_has(X86_FEATURE_CX16) || |
1813 | ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) |
1814 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; |
1815 | break; |
1816 | case 0x11: |
1817 | case 0x40: |
1818 | if (h->efr_reg & (1 << 9)) |
1819 | iommu->mmio_phys_end = MMIO_REG_END_OFFSET; |
1820 | else |
1821 | iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; |
1822 | |
1823 | /* |
1824 | * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. |
1825 | * XT, GAM also requires GA mode. Therefore, we need to |
1826 | * check cmpxchg16b support before enabling them. |
1827 | */ |
1828 | if (!boot_cpu_has(X86_FEATURE_CX16) || |
1829 | ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { |
1830 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; |
1831 | break; |
1832 | } |
1833 | |
1834 | if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) |
1835 | amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; |
1836 | |
1837 | early_iommu_features_init(iommu, h); |
1838 | |
1839 | break; |
1840 | default: |
1841 | return -EINVAL; |
1842 | } |
1843 | |
1844 | iommu->mmio_base = iommu_map_mmio_space(address: iommu->mmio_phys, |
1845 | end: iommu->mmio_phys_end); |
1846 | if (!iommu->mmio_base) |
1847 | return -ENOMEM; |
1848 | |
1849 | return init_iommu_from_acpi(iommu, h); |
1850 | } |
1851 | |
1852 | static int __init init_iommu_one_late(struct amd_iommu *iommu) |
1853 | { |
1854 | int ret; |
1855 | |
1856 | if (alloc_cwwb_sem(iommu)) |
1857 | return -ENOMEM; |
1858 | |
1859 | if (alloc_command_buffer(iommu)) |
1860 | return -ENOMEM; |
1861 | |
1862 | if (alloc_event_buffer(iommu)) |
1863 | return -ENOMEM; |
1864 | |
1865 | iommu->int_enabled = false; |
1866 | |
1867 | init_translation_status(iommu); |
1868 | if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { |
1869 | iommu_disable(iommu); |
1870 | clear_translation_pre_enabled(iommu); |
1871 | pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n" , |
1872 | iommu->index); |
1873 | } |
1874 | if (amd_iommu_pre_enabled) |
1875 | amd_iommu_pre_enabled = translation_pre_enabled(iommu); |
1876 | |
1877 | if (amd_iommu_irq_remap) { |
1878 | ret = amd_iommu_create_irq_domain(iommu); |
1879 | if (ret) |
1880 | return ret; |
1881 | } |
1882 | |
1883 | /* |
1884 | * Make sure IOMMU is not considered to translate itself. The IVRS |
1885 | * table tells us so, but this is a lie! |
1886 | */ |
1887 | iommu->pci_seg->rlookup_table[iommu->devid] = NULL; |
1888 | |
1889 | return 0; |
1890 | } |
1891 | |
1892 | /** |
1893 | * get_highest_supported_ivhd_type - Look up the appropriate IVHD type |
1894 | * @ivrs: Pointer to the IVRS header |
1895 | * |
1896 | * This function search through all IVDB of the maximum supported IVHD |
1897 | */ |
1898 | static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) |
1899 | { |
1900 | u8 *base = (u8 *)ivrs; |
1901 | struct ivhd_header *ivhd = (struct ivhd_header *) |
1902 | (base + IVRS_HEADER_LENGTH); |
1903 | u8 last_type = ivhd->type; |
1904 | u16 devid = ivhd->devid; |
1905 | |
1906 | while (((u8 *)ivhd - base < ivrs->length) && |
1907 | (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { |
1908 | u8 *p = (u8 *) ivhd; |
1909 | |
1910 | if (ivhd->devid == devid) |
1911 | last_type = ivhd->type; |
1912 | ivhd = (struct ivhd_header *)(p + ivhd->length); |
1913 | } |
1914 | |
1915 | return last_type; |
1916 | } |
1917 | |
1918 | /* |
1919 | * Iterates over all IOMMU entries in the ACPI table, allocates the |
1920 | * IOMMU structure and initializes it with init_iommu_one() |
1921 | */ |
1922 | static int __init init_iommu_all(struct acpi_table_header *table) |
1923 | { |
1924 | u8 *p = (u8 *)table, *end = (u8 *)table; |
1925 | struct ivhd_header *h; |
1926 | struct amd_iommu *iommu; |
1927 | int ret; |
1928 | |
1929 | end += table->length; |
1930 | p += IVRS_HEADER_LENGTH; |
1931 | |
1932 | /* Phase 1: Process all IVHD blocks */ |
1933 | while (p < end) { |
1934 | h = (struct ivhd_header *)p; |
1935 | if (*p == amd_iommu_target_ivhd_type) { |
1936 | |
1937 | DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x " |
1938 | "flags: %01x info %04x\n" , |
1939 | h->pci_seg, PCI_BUS_NUM(h->devid), |
1940 | PCI_SLOT(h->devid), PCI_FUNC(h->devid), |
1941 | h->cap_ptr, h->flags, h->info); |
1942 | DUMP_printk(" mmio-addr: %016llx\n" , |
1943 | h->mmio_phys); |
1944 | |
1945 | iommu = kzalloc(size: sizeof(struct amd_iommu), GFP_KERNEL); |
1946 | if (iommu == NULL) |
1947 | return -ENOMEM; |
1948 | |
1949 | ret = init_iommu_one(iommu, h, ivrs_base: table); |
1950 | if (ret) |
1951 | return ret; |
1952 | } |
1953 | p += h->length; |
1954 | |
1955 | } |
1956 | WARN_ON(p != end); |
1957 | |
1958 | /* Phase 2 : Early feature support check */ |
1959 | get_global_efr(); |
1960 | |
1961 | /* Phase 3 : Enabling IOMMU features */ |
1962 | for_each_iommu(iommu) { |
1963 | ret = init_iommu_one_late(iommu); |
1964 | if (ret) |
1965 | return ret; |
1966 | } |
1967 | |
1968 | return 0; |
1969 | } |
1970 | |
1971 | static void init_iommu_perf_ctr(struct amd_iommu *iommu) |
1972 | { |
1973 | u64 val; |
1974 | struct pci_dev *pdev = iommu->dev; |
1975 | |
1976 | if (!check_feature(FEATURE_PC)) |
1977 | return; |
1978 | |
1979 | amd_iommu_pc_present = true; |
1980 | |
1981 | pci_info(pdev, "IOMMU performance counters supported\n" ); |
1982 | |
1983 | val = readl(addr: iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); |
1984 | iommu->max_banks = (u8) ((val >> 12) & 0x3f); |
1985 | iommu->max_counters = (u8) ((val >> 7) & 0xf); |
1986 | |
1987 | return; |
1988 | } |
1989 | |
1990 | static ssize_t amd_iommu_show_cap(struct device *dev, |
1991 | struct device_attribute *attr, |
1992 | char *buf) |
1993 | { |
1994 | struct amd_iommu *iommu = dev_to_amd_iommu(dev); |
1995 | return sysfs_emit(buf, fmt: "%x\n" , iommu->cap); |
1996 | } |
1997 | static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); |
1998 | |
1999 | static ssize_t amd_iommu_show_features(struct device *dev, |
2000 | struct device_attribute *attr, |
2001 | char *buf) |
2002 | { |
2003 | return sysfs_emit(buf, fmt: "%llx:%llx\n" , amd_iommu_efr, amd_iommu_efr2); |
2004 | } |
2005 | static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); |
2006 | |
2007 | static struct attribute *amd_iommu_attrs[] = { |
2008 | &dev_attr_cap.attr, |
2009 | &dev_attr_features.attr, |
2010 | NULL, |
2011 | }; |
2012 | |
2013 | static struct attribute_group amd_iommu_group = { |
2014 | .name = "amd-iommu" , |
2015 | .attrs = amd_iommu_attrs, |
2016 | }; |
2017 | |
2018 | static const struct attribute_group *amd_iommu_groups[] = { |
2019 | &amd_iommu_group, |
2020 | NULL, |
2021 | }; |
2022 | |
2023 | /* |
2024 | * Note: IVHD 0x11 and 0x40 also contains exact copy |
2025 | * of the IOMMU Extended Feature Register [MMIO Offset 0030h]. |
2026 | * Default to EFR in IVHD since it is available sooner (i.e. before PCI init). |
2027 | */ |
2028 | static void __init late_iommu_features_init(struct amd_iommu *iommu) |
2029 | { |
2030 | u64 features, features2; |
2031 | |
2032 | if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) |
2033 | return; |
2034 | |
2035 | /* read extended feature bits */ |
2036 | features = readq(addr: iommu->mmio_base + MMIO_EXT_FEATURES); |
2037 | features2 = readq(addr: iommu->mmio_base + MMIO_EXT_FEATURES2); |
2038 | |
2039 | if (!amd_iommu_efr) { |
2040 | amd_iommu_efr = features; |
2041 | amd_iommu_efr2 = features2; |
2042 | return; |
2043 | } |
2044 | |
2045 | /* |
2046 | * Sanity check and warn if EFR values from |
2047 | * IVHD and MMIO conflict. |
2048 | */ |
2049 | if (features != amd_iommu_efr || |
2050 | features2 != amd_iommu_efr2) { |
2051 | pr_warn(FW_WARN |
2052 | "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n" , |
2053 | features, amd_iommu_efr, |
2054 | features2, amd_iommu_efr2); |
2055 | } |
2056 | } |
2057 | |
2058 | static int __init iommu_init_pci(struct amd_iommu *iommu) |
2059 | { |
2060 | int cap_ptr = iommu->cap_ptr; |
2061 | int ret; |
2062 | |
2063 | iommu->dev = pci_get_domain_bus_and_slot(domain: iommu->pci_seg->id, |
2064 | PCI_BUS_NUM(iommu->devid), |
2065 | devfn: iommu->devid & 0xff); |
2066 | if (!iommu->dev) |
2067 | return -ENODEV; |
2068 | |
2069 | /* Prevent binding other PCI device drivers to IOMMU devices */ |
2070 | iommu->dev->match_driver = false; |
2071 | |
2072 | /* ACPI _PRT won't have an IRQ for IOMMU */ |
2073 | iommu->dev->irq_managed = 1; |
2074 | |
2075 | pci_read_config_dword(dev: iommu->dev, where: cap_ptr + MMIO_CAP_HDR_OFFSET, |
2076 | val: &iommu->cap); |
2077 | |
2078 | if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) |
2079 | amd_iommu_iotlb_sup = false; |
2080 | |
2081 | late_iommu_features_init(iommu); |
2082 | |
2083 | if (check_feature(FEATURE_GT)) { |
2084 | int glxval; |
2085 | u64 pasmax; |
2086 | |
2087 | pasmax = amd_iommu_efr & FEATURE_PASID_MASK; |
2088 | pasmax >>= FEATURE_PASID_SHIFT; |
2089 | iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1; |
2090 | |
2091 | BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK); |
2092 | |
2093 | glxval = amd_iommu_efr & FEATURE_GLXVAL_MASK; |
2094 | glxval >>= FEATURE_GLXVAL_SHIFT; |
2095 | |
2096 | if (amd_iommu_max_glx_val == -1) |
2097 | amd_iommu_max_glx_val = glxval; |
2098 | else |
2099 | amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); |
2100 | } |
2101 | |
2102 | if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu)) |
2103 | return -ENOMEM; |
2104 | |
2105 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { |
2106 | pr_info("Using strict mode due to virtualization\n" ); |
2107 | iommu_set_dma_strict(); |
2108 | amd_iommu_np_cache = true; |
2109 | } |
2110 | |
2111 | init_iommu_perf_ctr(iommu); |
2112 | |
2113 | if (amd_iommu_pgtable == AMD_IOMMU_V2) { |
2114 | if (!check_feature(FEATURE_GIOSUP) || |
2115 | !check_feature(FEATURE_GT)) { |
2116 | pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n" ); |
2117 | amd_iommu_pgtable = AMD_IOMMU_V1; |
2118 | } |
2119 | } |
2120 | |
2121 | if (is_rd890_iommu(pdev: iommu->dev)) { |
2122 | int i, j; |
2123 | |
2124 | iommu->root_pdev = |
2125 | pci_get_domain_bus_and_slot(domain: iommu->pci_seg->id, |
2126 | bus: iommu->dev->bus->number, |
2127 | PCI_DEVFN(0, 0)); |
2128 | |
2129 | /* |
2130 | * Some rd890 systems may not be fully reconfigured by the |
2131 | * BIOS, so it's necessary for us to store this information so |
2132 | * it can be reprogrammed on resume |
2133 | */ |
2134 | pci_read_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 4, |
2135 | val: &iommu->stored_addr_lo); |
2136 | pci_read_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 8, |
2137 | val: &iommu->stored_addr_hi); |
2138 | |
2139 | /* Low bit locks writes to configuration space */ |
2140 | iommu->stored_addr_lo &= ~1; |
2141 | |
2142 | for (i = 0; i < 6; i++) |
2143 | for (j = 0; j < 0x12; j++) |
2144 | iommu->stored_l1[i][j] = iommu_read_l1(iommu, l1: i, address: j); |
2145 | |
2146 | for (i = 0; i < 0x83; i++) |
2147 | iommu->stored_l2[i] = iommu_read_l2(iommu, address: i); |
2148 | } |
2149 | |
2150 | amd_iommu_erratum_746_workaround(iommu); |
2151 | amd_iommu_ats_write_check_workaround(iommu); |
2152 | |
2153 | ret = iommu_device_sysfs_add(iommu: &iommu->iommu, parent: &iommu->dev->dev, |
2154 | groups: amd_iommu_groups, fmt: "ivhd%d" , iommu->index); |
2155 | if (ret) |
2156 | return ret; |
2157 | |
2158 | iommu_device_register(iommu: &iommu->iommu, ops: &amd_iommu_ops, NULL); |
2159 | |
2160 | return pci_enable_device(dev: iommu->dev); |
2161 | } |
2162 | |
2163 | static void print_iommu_info(void) |
2164 | { |
2165 | int i; |
2166 | static const char * const feat_str[] = { |
2167 | "PreF" , "PPR" , "X2APIC" , "NX" , "GT" , "[5]" , |
2168 | "IA" , "GA" , "HE" , "PC" |
2169 | }; |
2170 | |
2171 | if (amd_iommu_efr) { |
2172 | pr_info("Extended features (%#llx, %#llx):" , amd_iommu_efr, amd_iommu_efr2); |
2173 | |
2174 | for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { |
2175 | if (check_feature(mask: 1ULL << i)) |
2176 | pr_cont(" %s" , feat_str[i]); |
2177 | } |
2178 | |
2179 | if (check_feature(FEATURE_GAM_VAPIC)) |
2180 | pr_cont(" GA_vAPIC" ); |
2181 | |
2182 | if (check_feature(FEATURE_SNP)) |
2183 | pr_cont(" SNP" ); |
2184 | |
2185 | pr_cont("\n" ); |
2186 | } |
2187 | |
2188 | if (irq_remapping_enabled) { |
2189 | pr_info("Interrupt remapping enabled\n" ); |
2190 | if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) |
2191 | pr_info("X2APIC enabled\n" ); |
2192 | } |
2193 | if (amd_iommu_pgtable == AMD_IOMMU_V2) { |
2194 | pr_info("V2 page table enabled (Paging mode : %d level)\n" , |
2195 | amd_iommu_gpt_level); |
2196 | } |
2197 | } |
2198 | |
2199 | static int __init amd_iommu_init_pci(void) |
2200 | { |
2201 | struct amd_iommu *iommu; |
2202 | struct amd_iommu_pci_seg *pci_seg; |
2203 | int ret; |
2204 | |
2205 | for_each_iommu(iommu) { |
2206 | ret = iommu_init_pci(iommu); |
2207 | if (ret) { |
2208 | pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n" , |
2209 | iommu->index, ret); |
2210 | goto out; |
2211 | } |
2212 | /* Need to setup range after PCI init */ |
2213 | iommu_set_cwwb_range(iommu); |
2214 | } |
2215 | |
2216 | /* |
2217 | * Order is important here to make sure any unity map requirements are |
2218 | * fulfilled. The unity mappings are created and written to the device |
2219 | * table during the iommu_init_pci() call. |
2220 | * |
2221 | * After that we call init_device_table_dma() to make sure any |
2222 | * uninitialized DTE will block DMA, and in the end we flush the caches |
2223 | * of all IOMMUs to make sure the changes to the device table are |
2224 | * active. |
2225 | */ |
2226 | for_each_pci_segment(pci_seg) |
2227 | init_device_table_dma(pci_seg); |
2228 | |
2229 | for_each_iommu(iommu) |
2230 | amd_iommu_flush_all_caches(iommu); |
2231 | |
2232 | print_iommu_info(); |
2233 | |
2234 | out: |
2235 | return ret; |
2236 | } |
2237 | |
2238 | /**************************************************************************** |
2239 | * |
2240 | * The following functions initialize the MSI interrupts for all IOMMUs |
2241 | * in the system. It's a bit challenging because there could be multiple |
2242 | * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per |
2243 | * pci_dev. |
2244 | * |
2245 | ****************************************************************************/ |
2246 | |
2247 | static int iommu_setup_msi(struct amd_iommu *iommu) |
2248 | { |
2249 | int r; |
2250 | |
2251 | r = pci_enable_msi(dev: iommu->dev); |
2252 | if (r) |
2253 | return r; |
2254 | |
2255 | r = request_threaded_irq(irq: iommu->dev->irq, |
2256 | handler: amd_iommu_int_handler, |
2257 | thread_fn: amd_iommu_int_thread, |
2258 | flags: 0, name: "AMD-Vi" , |
2259 | dev: iommu); |
2260 | |
2261 | if (r) { |
2262 | pci_disable_msi(dev: iommu->dev); |
2263 | return r; |
2264 | } |
2265 | |
2266 | return 0; |
2267 | } |
2268 | |
2269 | union intcapxt { |
2270 | u64 capxt; |
2271 | struct { |
2272 | u64 reserved_0 : 2, |
2273 | dest_mode_logical : 1, |
2274 | reserved_1 : 5, |
2275 | destid_0_23 : 24, |
2276 | vector : 8, |
2277 | reserved_2 : 16, |
2278 | destid_24_31 : 8; |
2279 | }; |
2280 | } __attribute__ ((packed)); |
2281 | |
2282 | |
2283 | static struct irq_chip intcapxt_controller; |
2284 | |
2285 | static int intcapxt_irqdomain_activate(struct irq_domain *domain, |
2286 | struct irq_data *irqd, bool reserve) |
2287 | { |
2288 | return 0; |
2289 | } |
2290 | |
2291 | static void intcapxt_irqdomain_deactivate(struct irq_domain *domain, |
2292 | struct irq_data *irqd) |
2293 | { |
2294 | } |
2295 | |
2296 | |
2297 | static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, |
2298 | unsigned int nr_irqs, void *arg) |
2299 | { |
2300 | struct irq_alloc_info *info = arg; |
2301 | int i, ret; |
2302 | |
2303 | if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI) |
2304 | return -EINVAL; |
2305 | |
2306 | ret = irq_domain_alloc_irqs_parent(domain, irq_base: virq, nr_irqs, arg); |
2307 | if (ret < 0) |
2308 | return ret; |
2309 | |
2310 | for (i = virq; i < virq + nr_irqs; i++) { |
2311 | struct irq_data *irqd = irq_domain_get_irq_data(domain, virq: i); |
2312 | |
2313 | irqd->chip = &intcapxt_controller; |
2314 | irqd->hwirq = info->hwirq; |
2315 | irqd->chip_data = info->data; |
2316 | __irq_set_handler(irq: i, handle: handle_edge_irq, is_chained: 0, name: "edge" ); |
2317 | } |
2318 | |
2319 | return ret; |
2320 | } |
2321 | |
2322 | static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq, |
2323 | unsigned int nr_irqs) |
2324 | { |
2325 | irq_domain_free_irqs_top(domain, virq, nr_irqs); |
2326 | } |
2327 | |
2328 | |
2329 | static void intcapxt_unmask_irq(struct irq_data *irqd) |
2330 | { |
2331 | struct amd_iommu *iommu = irqd->chip_data; |
2332 | struct irq_cfg *cfg = irqd_cfg(irq_data: irqd); |
2333 | union intcapxt xt; |
2334 | |
2335 | xt.capxt = 0ULL; |
2336 | xt.dest_mode_logical = apic->dest_mode_logical; |
2337 | xt.vector = cfg->vector; |
2338 | xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); |
2339 | xt.destid_24_31 = cfg->dest_apicid >> 24; |
2340 | |
2341 | writeq(val: xt.capxt, addr: iommu->mmio_base + irqd->hwirq); |
2342 | } |
2343 | |
2344 | static void intcapxt_mask_irq(struct irq_data *irqd) |
2345 | { |
2346 | struct amd_iommu *iommu = irqd->chip_data; |
2347 | |
2348 | writeq(val: 0, addr: iommu->mmio_base + irqd->hwirq); |
2349 | } |
2350 | |
2351 | |
2352 | static int intcapxt_set_affinity(struct irq_data *irqd, |
2353 | const struct cpumask *mask, bool force) |
2354 | { |
2355 | struct irq_data *parent = irqd->parent_data; |
2356 | int ret; |
2357 | |
2358 | ret = parent->chip->irq_set_affinity(parent, mask, force); |
2359 | if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) |
2360 | return ret; |
2361 | return 0; |
2362 | } |
2363 | |
2364 | static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on) |
2365 | { |
2366 | return on ? -EOPNOTSUPP : 0; |
2367 | } |
2368 | |
2369 | static struct irq_chip intcapxt_controller = { |
2370 | .name = "IOMMU-MSI" , |
2371 | .irq_unmask = intcapxt_unmask_irq, |
2372 | .irq_mask = intcapxt_mask_irq, |
2373 | .irq_ack = irq_chip_ack_parent, |
2374 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
2375 | .irq_set_affinity = intcapxt_set_affinity, |
2376 | .irq_set_wake = intcapxt_set_wake, |
2377 | .flags = IRQCHIP_MASK_ON_SUSPEND, |
2378 | }; |
2379 | |
2380 | static const struct irq_domain_ops intcapxt_domain_ops = { |
2381 | .alloc = intcapxt_irqdomain_alloc, |
2382 | .free = intcapxt_irqdomain_free, |
2383 | .activate = intcapxt_irqdomain_activate, |
2384 | .deactivate = intcapxt_irqdomain_deactivate, |
2385 | }; |
2386 | |
2387 | |
2388 | static struct irq_domain *iommu_irqdomain; |
2389 | |
2390 | static struct irq_domain *iommu_get_irqdomain(void) |
2391 | { |
2392 | struct fwnode_handle *fn; |
2393 | |
2394 | /* No need for locking here (yet) as the init is single-threaded */ |
2395 | if (iommu_irqdomain) |
2396 | return iommu_irqdomain; |
2397 | |
2398 | fn = irq_domain_alloc_named_fwnode(name: "AMD-Vi-MSI" ); |
2399 | if (!fn) |
2400 | return NULL; |
2401 | |
2402 | iommu_irqdomain = irq_domain_create_hierarchy(parent: x86_vector_domain, flags: 0, size: 0, |
2403 | fwnode: fn, ops: &intcapxt_domain_ops, |
2404 | NULL); |
2405 | if (!iommu_irqdomain) |
2406 | irq_domain_free_fwnode(fwnode: fn); |
2407 | |
2408 | return iommu_irqdomain; |
2409 | } |
2410 | |
2411 | static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname, |
2412 | int hwirq, irq_handler_t thread_fn) |
2413 | { |
2414 | struct irq_domain *domain; |
2415 | struct irq_alloc_info info; |
2416 | int irq, ret; |
2417 | int node = dev_to_node(dev: &iommu->dev->dev); |
2418 | |
2419 | domain = iommu_get_irqdomain(); |
2420 | if (!domain) |
2421 | return -ENXIO; |
2422 | |
2423 | init_irq_alloc_info(info: &info, NULL); |
2424 | info.type = X86_IRQ_ALLOC_TYPE_AMDVI; |
2425 | info.data = iommu; |
2426 | info.hwirq = hwirq; |
2427 | |
2428 | irq = irq_domain_alloc_irqs(domain, nr_irqs: 1, node, arg: &info); |
2429 | if (irq < 0) { |
2430 | irq_domain_remove(host: domain); |
2431 | return irq; |
2432 | } |
2433 | |
2434 | ret = request_threaded_irq(irq, handler: amd_iommu_int_handler, |
2435 | thread_fn, flags: 0, name: devname, dev: iommu); |
2436 | if (ret) { |
2437 | irq_domain_free_irqs(virq: irq, nr_irqs: 1); |
2438 | irq_domain_remove(host: domain); |
2439 | return ret; |
2440 | } |
2441 | |
2442 | return 0; |
2443 | } |
2444 | |
2445 | static int iommu_setup_intcapxt(struct amd_iommu *iommu) |
2446 | { |
2447 | int ret; |
2448 | |
2449 | snprintf(buf: iommu->evt_irq_name, size: sizeof(iommu->evt_irq_name), |
2450 | fmt: "AMD-Vi%d-Evt" , iommu->index); |
2451 | ret = __iommu_setup_intcapxt(iommu, devname: iommu->evt_irq_name, |
2452 | MMIO_INTCAPXT_EVT_OFFSET, |
2453 | thread_fn: amd_iommu_int_thread_evtlog); |
2454 | if (ret) |
2455 | return ret; |
2456 | |
2457 | snprintf(buf: iommu->ppr_irq_name, size: sizeof(iommu->ppr_irq_name), |
2458 | fmt: "AMD-Vi%d-PPR" , iommu->index); |
2459 | ret = __iommu_setup_intcapxt(iommu, devname: iommu->ppr_irq_name, |
2460 | MMIO_INTCAPXT_PPR_OFFSET, |
2461 | thread_fn: amd_iommu_int_thread_pprlog); |
2462 | if (ret) |
2463 | return ret; |
2464 | |
2465 | #ifdef CONFIG_IRQ_REMAP |
2466 | snprintf(buf: iommu->ga_irq_name, size: sizeof(iommu->ga_irq_name), |
2467 | fmt: "AMD-Vi%d-GA" , iommu->index); |
2468 | ret = __iommu_setup_intcapxt(iommu, devname: iommu->ga_irq_name, |
2469 | MMIO_INTCAPXT_GALOG_OFFSET, |
2470 | thread_fn: amd_iommu_int_thread_galog); |
2471 | #endif |
2472 | |
2473 | return ret; |
2474 | } |
2475 | |
2476 | static int iommu_init_irq(struct amd_iommu *iommu) |
2477 | { |
2478 | int ret; |
2479 | |
2480 | if (iommu->int_enabled) |
2481 | goto enable_faults; |
2482 | |
2483 | if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) |
2484 | ret = iommu_setup_intcapxt(iommu); |
2485 | else if (iommu->dev->msi_cap) |
2486 | ret = iommu_setup_msi(iommu); |
2487 | else |
2488 | ret = -ENODEV; |
2489 | |
2490 | if (ret) |
2491 | return ret; |
2492 | |
2493 | iommu->int_enabled = true; |
2494 | enable_faults: |
2495 | |
2496 | if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) |
2497 | iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); |
2498 | |
2499 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); |
2500 | |
2501 | return 0; |
2502 | } |
2503 | |
2504 | /**************************************************************************** |
2505 | * |
2506 | * The next functions belong to the third pass of parsing the ACPI |
2507 | * table. In this last pass the memory mapping requirements are |
2508 | * gathered (like exclusion and unity mapping ranges). |
2509 | * |
2510 | ****************************************************************************/ |
2511 | |
2512 | static void __init free_unity_maps(void) |
2513 | { |
2514 | struct unity_map_entry *entry, *next; |
2515 | struct amd_iommu_pci_seg *p, *pci_seg; |
2516 | |
2517 | for_each_pci_segment_safe(pci_seg, p) { |
2518 | list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { |
2519 | list_del(entry: &entry->list); |
2520 | kfree(objp: entry); |
2521 | } |
2522 | } |
2523 | } |
2524 | |
2525 | /* called for unity map ACPI definition */ |
2526 | static int __init init_unity_map_range(struct ivmd_header *m, |
2527 | struct acpi_table_header *ivrs_base) |
2528 | { |
2529 | struct unity_map_entry *e = NULL; |
2530 | struct amd_iommu_pci_seg *pci_seg; |
2531 | char *s; |
2532 | |
2533 | pci_seg = get_pci_segment(id: m->pci_seg, ivrs_base); |
2534 | if (pci_seg == NULL) |
2535 | return -ENOMEM; |
2536 | |
2537 | e = kzalloc(size: sizeof(*e), GFP_KERNEL); |
2538 | if (e == NULL) |
2539 | return -ENOMEM; |
2540 | |
2541 | switch (m->type) { |
2542 | default: |
2543 | kfree(objp: e); |
2544 | return 0; |
2545 | case ACPI_IVMD_TYPE: |
2546 | s = "IVMD_TYPEi\t\t\t" ; |
2547 | e->devid_start = e->devid_end = m->devid; |
2548 | break; |
2549 | case ACPI_IVMD_TYPE_ALL: |
2550 | s = "IVMD_TYPE_ALL\t\t" ; |
2551 | e->devid_start = 0; |
2552 | e->devid_end = pci_seg->last_bdf; |
2553 | break; |
2554 | case ACPI_IVMD_TYPE_RANGE: |
2555 | s = "IVMD_TYPE_RANGE\t\t" ; |
2556 | e->devid_start = m->devid; |
2557 | e->devid_end = m->aux; |
2558 | break; |
2559 | } |
2560 | e->address_start = PAGE_ALIGN(m->range_start); |
2561 | e->address_end = e->address_start + PAGE_ALIGN(m->range_length); |
2562 | e->prot = m->flags >> 1; |
2563 | |
2564 | /* |
2565 | * Treat per-device exclusion ranges as r/w unity-mapped regions |
2566 | * since some buggy BIOSes might lead to the overwritten exclusion |
2567 | * range (exclusion_start and exclusion_length members). This |
2568 | * happens when there are multiple exclusion ranges (IVMD entries) |
2569 | * defined in ACPI table. |
2570 | */ |
2571 | if (m->flags & IVMD_FLAG_EXCL_RANGE) |
2572 | e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; |
2573 | |
2574 | DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: " |
2575 | "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx" |
2576 | " flags: %x\n" , s, m->pci_seg, |
2577 | PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), |
2578 | PCI_FUNC(e->devid_start), m->pci_seg, |
2579 | PCI_BUS_NUM(e->devid_end), |
2580 | PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), |
2581 | e->address_start, e->address_end, m->flags); |
2582 | |
2583 | list_add_tail(new: &e->list, head: &pci_seg->unity_map); |
2584 | |
2585 | return 0; |
2586 | } |
2587 | |
2588 | /* iterates over all memory definitions we find in the ACPI table */ |
2589 | static int __init init_memory_definitions(struct acpi_table_header *table) |
2590 | { |
2591 | u8 *p = (u8 *)table, *end = (u8 *)table; |
2592 | struct ivmd_header *m; |
2593 | |
2594 | end += table->length; |
2595 | p += IVRS_HEADER_LENGTH; |
2596 | |
2597 | while (p < end) { |
2598 | m = (struct ivmd_header *)p; |
2599 | if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) |
2600 | init_unity_map_range(m, ivrs_base: table); |
2601 | |
2602 | p += m->length; |
2603 | } |
2604 | |
2605 | return 0; |
2606 | } |
2607 | |
2608 | /* |
2609 | * Init the device table to not allow DMA access for devices |
2610 | */ |
2611 | static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg) |
2612 | { |
2613 | u32 devid; |
2614 | struct dev_table_entry *dev_table = pci_seg->dev_table; |
2615 | |
2616 | if (dev_table == NULL) |
2617 | return; |
2618 | |
2619 | for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { |
2620 | __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID); |
2621 | if (!amd_iommu_snp_en) |
2622 | __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION); |
2623 | } |
2624 | } |
2625 | |
2626 | static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg) |
2627 | { |
2628 | u32 devid; |
2629 | struct dev_table_entry *dev_table = pci_seg->dev_table; |
2630 | |
2631 | if (dev_table == NULL) |
2632 | return; |
2633 | |
2634 | for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { |
2635 | dev_table[devid].data[0] = 0ULL; |
2636 | dev_table[devid].data[1] = 0ULL; |
2637 | } |
2638 | } |
2639 | |
2640 | static void init_device_table(void) |
2641 | { |
2642 | struct amd_iommu_pci_seg *pci_seg; |
2643 | u32 devid; |
2644 | |
2645 | if (!amd_iommu_irq_remap) |
2646 | return; |
2647 | |
2648 | for_each_pci_segment(pci_seg) { |
2649 | for (devid = 0; devid <= pci_seg->last_bdf; ++devid) |
2650 | __set_dev_entry_bit(dev_table: pci_seg->dev_table, |
2651 | devid, DEV_ENTRY_IRQ_TBL_EN); |
2652 | } |
2653 | } |
2654 | |
2655 | static void iommu_init_flags(struct amd_iommu *iommu) |
2656 | { |
2657 | iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? |
2658 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : |
2659 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); |
2660 | |
2661 | iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? |
2662 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : |
2663 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); |
2664 | |
2665 | iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? |
2666 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : |
2667 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); |
2668 | |
2669 | iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? |
2670 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : |
2671 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); |
2672 | |
2673 | /* |
2674 | * make IOMMU memory accesses cache coherent |
2675 | */ |
2676 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); |
2677 | |
2678 | /* Set IOTLB invalidation timeout to 1s */ |
2679 | iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); |
2680 | } |
2681 | |
2682 | static void iommu_apply_resume_quirks(struct amd_iommu *iommu) |
2683 | { |
2684 | int i, j; |
2685 | u32 ioc_feature_control; |
2686 | struct pci_dev *pdev = iommu->root_pdev; |
2687 | |
2688 | /* RD890 BIOSes may not have completely reconfigured the iommu */ |
2689 | if (!is_rd890_iommu(pdev: iommu->dev) || !pdev) |
2690 | return; |
2691 | |
2692 | /* |
2693 | * First, we need to ensure that the iommu is enabled. This is |
2694 | * controlled by a register in the northbridge |
2695 | */ |
2696 | |
2697 | /* Select Northbridge indirect register 0x75 and enable writing */ |
2698 | pci_write_config_dword(dev: pdev, where: 0x60, val: 0x75 | (1 << 7)); |
2699 | pci_read_config_dword(dev: pdev, where: 0x64, val: &ioc_feature_control); |
2700 | |
2701 | /* Enable the iommu */ |
2702 | if (!(ioc_feature_control & 0x1)) |
2703 | pci_write_config_dword(dev: pdev, where: 0x64, val: ioc_feature_control | 1); |
2704 | |
2705 | /* Restore the iommu BAR */ |
2706 | pci_write_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 4, |
2707 | val: iommu->stored_addr_lo); |
2708 | pci_write_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 8, |
2709 | val: iommu->stored_addr_hi); |
2710 | |
2711 | /* Restore the l1 indirect regs for each of the 6 l1s */ |
2712 | for (i = 0; i < 6; i++) |
2713 | for (j = 0; j < 0x12; j++) |
2714 | iommu_write_l1(iommu, l1: i, address: j, val: iommu->stored_l1[i][j]); |
2715 | |
2716 | /* Restore the l2 indirect regs */ |
2717 | for (i = 0; i < 0x83; i++) |
2718 | iommu_write_l2(iommu, address: i, val: iommu->stored_l2[i]); |
2719 | |
2720 | /* Lock PCI setup registers */ |
2721 | pci_write_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 4, |
2722 | val: iommu->stored_addr_lo | 1); |
2723 | } |
2724 | |
2725 | static void iommu_enable_ga(struct amd_iommu *iommu) |
2726 | { |
2727 | #ifdef CONFIG_IRQ_REMAP |
2728 | switch (amd_iommu_guest_ir) { |
2729 | case AMD_IOMMU_GUEST_IR_VAPIC: |
2730 | case AMD_IOMMU_GUEST_IR_LEGACY_GA: |
2731 | iommu_feature_enable(iommu, CONTROL_GA_EN); |
2732 | iommu->irte_ops = &irte_128_ops; |
2733 | break; |
2734 | default: |
2735 | iommu->irte_ops = &irte_32_ops; |
2736 | break; |
2737 | } |
2738 | #endif |
2739 | } |
2740 | |
2741 | static void iommu_disable_irtcachedis(struct amd_iommu *iommu) |
2742 | { |
2743 | iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); |
2744 | } |
2745 | |
2746 | static void iommu_enable_irtcachedis(struct amd_iommu *iommu) |
2747 | { |
2748 | u64 ctrl; |
2749 | |
2750 | if (!amd_iommu_irtcachedis) |
2751 | return; |
2752 | |
2753 | /* |
2754 | * Note: |
2755 | * The support for IRTCacheDis feature is dertermined by |
2756 | * checking if the bit is writable. |
2757 | */ |
2758 | iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS); |
2759 | ctrl = readq(addr: iommu->mmio_base + MMIO_CONTROL_OFFSET); |
2760 | ctrl &= (1ULL << CONTROL_IRTCACHEDIS); |
2761 | if (ctrl) |
2762 | iommu->irtcachedis_enabled = true; |
2763 | pr_info("iommu%d (%#06x) : IRT cache is %s\n" , |
2764 | iommu->index, iommu->devid, |
2765 | iommu->irtcachedis_enabled ? "disabled" : "enabled" ); |
2766 | } |
2767 | |
2768 | static void early_enable_iommu(struct amd_iommu *iommu) |
2769 | { |
2770 | iommu_disable(iommu); |
2771 | iommu_init_flags(iommu); |
2772 | iommu_set_device_table(iommu); |
2773 | iommu_enable_command_buffer(iommu); |
2774 | iommu_enable_event_buffer(iommu); |
2775 | iommu_set_exclusion_range(iommu); |
2776 | iommu_enable_gt(iommu); |
2777 | iommu_enable_ga(iommu); |
2778 | iommu_enable_xt(iommu); |
2779 | iommu_enable_irtcachedis(iommu); |
2780 | iommu_enable(iommu); |
2781 | amd_iommu_flush_all_caches(iommu); |
2782 | } |
2783 | |
2784 | /* |
2785 | * This function finally enables all IOMMUs found in the system after |
2786 | * they have been initialized. |
2787 | * |
2788 | * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy |
2789 | * the old content of device table entries. Not this case or copy failed, |
2790 | * just continue as normal kernel does. |
2791 | */ |
2792 | static void early_enable_iommus(void) |
2793 | { |
2794 | struct amd_iommu *iommu; |
2795 | struct amd_iommu_pci_seg *pci_seg; |
2796 | |
2797 | if (!copy_device_table()) { |
2798 | /* |
2799 | * If come here because of failure in copying device table from old |
2800 | * kernel with all IOMMUs enabled, print error message and try to |
2801 | * free allocated old_dev_tbl_cpy. |
2802 | */ |
2803 | if (amd_iommu_pre_enabled) |
2804 | pr_err("Failed to copy DEV table from previous kernel.\n" ); |
2805 | |
2806 | for_each_pci_segment(pci_seg) { |
2807 | if (pci_seg->old_dev_tbl_cpy != NULL) { |
2808 | free_pages(addr: (unsigned long)pci_seg->old_dev_tbl_cpy, |
2809 | order: get_order(size: pci_seg->dev_table_size)); |
2810 | pci_seg->old_dev_tbl_cpy = NULL; |
2811 | } |
2812 | } |
2813 | |
2814 | for_each_iommu(iommu) { |
2815 | clear_translation_pre_enabled(iommu); |
2816 | early_enable_iommu(iommu); |
2817 | } |
2818 | } else { |
2819 | pr_info("Copied DEV table from previous kernel.\n" ); |
2820 | |
2821 | for_each_pci_segment(pci_seg) { |
2822 | free_pages(addr: (unsigned long)pci_seg->dev_table, |
2823 | order: get_order(size: pci_seg->dev_table_size)); |
2824 | pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; |
2825 | } |
2826 | |
2827 | for_each_iommu(iommu) { |
2828 | iommu_disable_command_buffer(iommu); |
2829 | iommu_disable_event_buffer(iommu); |
2830 | iommu_disable_irtcachedis(iommu); |
2831 | iommu_enable_command_buffer(iommu); |
2832 | iommu_enable_event_buffer(iommu); |
2833 | iommu_enable_gt(iommu); |
2834 | iommu_enable_ga(iommu); |
2835 | iommu_enable_xt(iommu); |
2836 | iommu_enable_irtcachedis(iommu); |
2837 | iommu_set_device_table(iommu); |
2838 | amd_iommu_flush_all_caches(iommu); |
2839 | } |
2840 | } |
2841 | } |
2842 | |
2843 | static void enable_iommus_v2(void) |
2844 | { |
2845 | struct amd_iommu *iommu; |
2846 | |
2847 | for_each_iommu(iommu) |
2848 | iommu_enable_ppr_log(iommu); |
2849 | } |
2850 | |
2851 | static void enable_iommus_vapic(void) |
2852 | { |
2853 | #ifdef CONFIG_IRQ_REMAP |
2854 | u32 status, i; |
2855 | struct amd_iommu *iommu; |
2856 | |
2857 | for_each_iommu(iommu) { |
2858 | /* |
2859 | * Disable GALog if already running. It could have been enabled |
2860 | * in the previous boot before kdump. |
2861 | */ |
2862 | status = readl(addr: iommu->mmio_base + MMIO_STATUS_OFFSET); |
2863 | if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) |
2864 | continue; |
2865 | |
2866 | iommu_feature_disable(iommu, CONTROL_GALOG_EN); |
2867 | iommu_feature_disable(iommu, CONTROL_GAINT_EN); |
2868 | |
2869 | /* |
2870 | * Need to set and poll check the GALOGRun bit to zero before |
2871 | * we can set/ modify GA Log registers safely. |
2872 | */ |
2873 | for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) { |
2874 | status = readl(addr: iommu->mmio_base + MMIO_STATUS_OFFSET); |
2875 | if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) |
2876 | break; |
2877 | udelay(10); |
2878 | } |
2879 | |
2880 | if (WARN_ON(i >= MMIO_STATUS_TIMEOUT)) |
2881 | return; |
2882 | } |
2883 | |
2884 | if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && |
2885 | !check_feature(FEATURE_GAM_VAPIC)) { |
2886 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; |
2887 | return; |
2888 | } |
2889 | |
2890 | if (amd_iommu_snp_en && |
2891 | !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) { |
2892 | pr_warn("Force to disable Virtual APIC due to SNP\n" ); |
2893 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; |
2894 | return; |
2895 | } |
2896 | |
2897 | /* Enabling GAM and SNPAVIC support */ |
2898 | for_each_iommu(iommu) { |
2899 | if (iommu_init_ga_log(iommu) || |
2900 | iommu_ga_log_enable(iommu)) |
2901 | return; |
2902 | |
2903 | iommu_feature_enable(iommu, CONTROL_GAM_EN); |
2904 | if (amd_iommu_snp_en) |
2905 | iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN); |
2906 | } |
2907 | |
2908 | amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); |
2909 | pr_info("Virtual APIC enabled\n" ); |
2910 | #endif |
2911 | } |
2912 | |
2913 | static void enable_iommus(void) |
2914 | { |
2915 | early_enable_iommus(); |
2916 | } |
2917 | |
2918 | static void disable_iommus(void) |
2919 | { |
2920 | struct amd_iommu *iommu; |
2921 | |
2922 | for_each_iommu(iommu) |
2923 | iommu_disable(iommu); |
2924 | |
2925 | #ifdef CONFIG_IRQ_REMAP |
2926 | if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) |
2927 | amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); |
2928 | #endif |
2929 | } |
2930 | |
2931 | /* |
2932 | * Suspend/Resume support |
2933 | * disable suspend until real resume implemented |
2934 | */ |
2935 | |
2936 | static void amd_iommu_resume(void) |
2937 | { |
2938 | struct amd_iommu *iommu; |
2939 | |
2940 | for_each_iommu(iommu) |
2941 | iommu_apply_resume_quirks(iommu); |
2942 | |
2943 | /* re-load the hardware */ |
2944 | enable_iommus(); |
2945 | |
2946 | amd_iommu_enable_interrupts(); |
2947 | } |
2948 | |
2949 | static int amd_iommu_suspend(void) |
2950 | { |
2951 | /* disable IOMMUs to go out of the way for BIOS */ |
2952 | disable_iommus(); |
2953 | |
2954 | return 0; |
2955 | } |
2956 | |
2957 | static struct syscore_ops amd_iommu_syscore_ops = { |
2958 | .suspend = amd_iommu_suspend, |
2959 | .resume = amd_iommu_resume, |
2960 | }; |
2961 | |
2962 | static void __init free_iommu_resources(void) |
2963 | { |
2964 | kmem_cache_destroy(s: amd_iommu_irq_cache); |
2965 | amd_iommu_irq_cache = NULL; |
2966 | |
2967 | free_iommu_all(); |
2968 | free_pci_segments(); |
2969 | } |
2970 | |
2971 | /* SB IOAPIC is always on this device in AMD systems */ |
2972 | #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) |
2973 | |
2974 | static bool __init check_ioapic_information(void) |
2975 | { |
2976 | const char *fw_bug = FW_BUG; |
2977 | bool ret, has_sb_ioapic; |
2978 | int idx; |
2979 | |
2980 | has_sb_ioapic = false; |
2981 | ret = false; |
2982 | |
2983 | /* |
2984 | * If we have map overrides on the kernel command line the |
2985 | * messages in this function might not describe firmware bugs |
2986 | * anymore - so be careful |
2987 | */ |
2988 | if (cmdline_maps) |
2989 | fw_bug = "" ; |
2990 | |
2991 | for (idx = 0; idx < nr_ioapics; idx++) { |
2992 | int devid, id = mpc_ioapic_id(ioapic: idx); |
2993 | |
2994 | devid = get_ioapic_devid(id); |
2995 | if (devid < 0) { |
2996 | pr_err("%s: IOAPIC[%d] not in IVRS table\n" , |
2997 | fw_bug, id); |
2998 | ret = false; |
2999 | } else if (devid == IOAPIC_SB_DEVID) { |
3000 | has_sb_ioapic = true; |
3001 | ret = true; |
3002 | } |
3003 | } |
3004 | |
3005 | if (!has_sb_ioapic) { |
3006 | /* |
3007 | * We expect the SB IOAPIC to be listed in the IVRS |
3008 | * table. The system timer is connected to the SB IOAPIC |
3009 | * and if we don't have it in the list the system will |
3010 | * panic at boot time. This situation usually happens |
3011 | * when the BIOS is buggy and provides us the wrong |
3012 | * device id for the IOAPIC in the system. |
3013 | */ |
3014 | pr_err("%s: No southbridge IOAPIC found\n" , fw_bug); |
3015 | } |
3016 | |
3017 | if (!ret) |
3018 | pr_err("Disabling interrupt remapping\n" ); |
3019 | |
3020 | return ret; |
3021 | } |
3022 | |
3023 | static void __init free_dma_resources(void) |
3024 | { |
3025 | free_pages(addr: (unsigned long)amd_iommu_pd_alloc_bitmap, |
3026 | order: get_order(MAX_DOMAIN_ID/8)); |
3027 | amd_iommu_pd_alloc_bitmap = NULL; |
3028 | |
3029 | free_unity_maps(); |
3030 | } |
3031 | |
3032 | static void __init ivinfo_init(void *ivrs) |
3033 | { |
3034 | amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET)); |
3035 | } |
3036 | |
3037 | /* |
3038 | * This is the hardware init function for AMD IOMMU in the system. |
3039 | * This function is called either from amd_iommu_init or from the interrupt |
3040 | * remapping setup code. |
3041 | * |
3042 | * This function basically parses the ACPI table for AMD IOMMU (IVRS) |
3043 | * four times: |
3044 | * |
3045 | * 1 pass) Discover the most comprehensive IVHD type to use. |
3046 | * |
3047 | * 2 pass) Find the highest PCI device id the driver has to handle. |
3048 | * Upon this information the size of the data structures is |
3049 | * determined that needs to be allocated. |
3050 | * |
3051 | * 3 pass) Initialize the data structures just allocated with the |
3052 | * information in the ACPI table about available AMD IOMMUs |
3053 | * in the system. It also maps the PCI devices in the |
3054 | * system to specific IOMMUs |
3055 | * |
3056 | * 4 pass) After the basic data structures are allocated and |
3057 | * initialized we update them with information about memory |
3058 | * remapping requirements parsed out of the ACPI table in |
3059 | * this last pass. |
3060 | * |
3061 | * After everything is set up the IOMMUs are enabled and the necessary |
3062 | * hotplug and suspend notifiers are registered. |
3063 | */ |
3064 | static int __init early_amd_iommu_init(void) |
3065 | { |
3066 | struct acpi_table_header *ivrs_base; |
3067 | int remap_cache_sz, ret; |
3068 | acpi_status status; |
3069 | |
3070 | if (!amd_iommu_detected) |
3071 | return -ENODEV; |
3072 | |
3073 | status = acpi_get_table(signature: "IVRS" , instance: 0, out_table: &ivrs_base); |
3074 | if (status == AE_NOT_FOUND) |
3075 | return -ENODEV; |
3076 | else if (ACPI_FAILURE(status)) { |
3077 | const char *err = acpi_format_exception(exception: status); |
3078 | pr_err("IVRS table error: %s\n" , err); |
3079 | return -EINVAL; |
3080 | } |
3081 | |
3082 | /* |
3083 | * Validate checksum here so we don't need to do it when |
3084 | * we actually parse the table |
3085 | */ |
3086 | ret = check_ivrs_checksum(table: ivrs_base); |
3087 | if (ret) |
3088 | goto out; |
3089 | |
3090 | ivinfo_init(ivrs: ivrs_base); |
3091 | |
3092 | amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs: ivrs_base); |
3093 | DUMP_printk("Using IVHD type %#x\n" , amd_iommu_target_ivhd_type); |
3094 | |
3095 | /* Device table - directly used by all IOMMUs */ |
3096 | ret = -ENOMEM; |
3097 | |
3098 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( |
3099 | GFP_KERNEL | __GFP_ZERO, |
3100 | order: get_order(MAX_DOMAIN_ID/8)); |
3101 | if (amd_iommu_pd_alloc_bitmap == NULL) |
3102 | goto out; |
3103 | |
3104 | /* |
3105 | * never allocate domain 0 because its used as the non-allocated and |
3106 | * error value placeholder |
3107 | */ |
3108 | __set_bit(0, amd_iommu_pd_alloc_bitmap); |
3109 | |
3110 | /* |
3111 | * now the data structures are allocated and basically initialized |
3112 | * start the real acpi table scan |
3113 | */ |
3114 | ret = init_iommu_all(table: ivrs_base); |
3115 | if (ret) |
3116 | goto out; |
3117 | |
3118 | /* 5 level guest page table */ |
3119 | if (cpu_feature_enabled(X86_FEATURE_LA57) && |
3120 | check_feature_gpt_level() == GUEST_PGTABLE_5_LEVEL) |
3121 | amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; |
3122 | |
3123 | /* Disable any previously enabled IOMMUs */ |
3124 | if (!is_kdump_kernel() || amd_iommu_disabled) |
3125 | disable_iommus(); |
3126 | |
3127 | if (amd_iommu_irq_remap) |
3128 | amd_iommu_irq_remap = check_ioapic_information(); |
3129 | |
3130 | if (amd_iommu_irq_remap) { |
3131 | struct amd_iommu_pci_seg *pci_seg; |
3132 | /* |
3133 | * Interrupt remapping enabled, create kmem_cache for the |
3134 | * remapping tables. |
3135 | */ |
3136 | ret = -ENOMEM; |
3137 | if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) |
3138 | remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); |
3139 | else |
3140 | remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); |
3141 | amd_iommu_irq_cache = kmem_cache_create(name: "irq_remap_cache" , |
3142 | size: remap_cache_sz, |
3143 | DTE_INTTAB_ALIGNMENT, |
3144 | flags: 0, NULL); |
3145 | if (!amd_iommu_irq_cache) |
3146 | goto out; |
3147 | |
3148 | for_each_pci_segment(pci_seg) { |
3149 | if (alloc_irq_lookup_table(pci_seg)) |
3150 | goto out; |
3151 | } |
3152 | } |
3153 | |
3154 | ret = init_memory_definitions(table: ivrs_base); |
3155 | if (ret) |
3156 | goto out; |
3157 | |
3158 | /* init the device table */ |
3159 | init_device_table(); |
3160 | |
3161 | out: |
3162 | /* Don't leak any ACPI memory */ |
3163 | acpi_put_table(table: ivrs_base); |
3164 | |
3165 | return ret; |
3166 | } |
3167 | |
3168 | static int amd_iommu_enable_interrupts(void) |
3169 | { |
3170 | struct amd_iommu *iommu; |
3171 | int ret = 0; |
3172 | |
3173 | for_each_iommu(iommu) { |
3174 | ret = iommu_init_irq(iommu); |
3175 | if (ret) |
3176 | goto out; |
3177 | } |
3178 | |
3179 | /* |
3180 | * Interrupt handler is ready to process interrupts. Enable |
3181 | * PPR and GA log interrupt for all IOMMUs. |
3182 | */ |
3183 | enable_iommus_vapic(); |
3184 | enable_iommus_v2(); |
3185 | |
3186 | out: |
3187 | return ret; |
3188 | } |
3189 | |
3190 | static bool __init detect_ivrs(void) |
3191 | { |
3192 | struct acpi_table_header *ivrs_base; |
3193 | acpi_status status; |
3194 | int i; |
3195 | |
3196 | status = acpi_get_table(signature: "IVRS" , instance: 0, out_table: &ivrs_base); |
3197 | if (status == AE_NOT_FOUND) |
3198 | return false; |
3199 | else if (ACPI_FAILURE(status)) { |
3200 | const char *err = acpi_format_exception(exception: status); |
3201 | pr_err("IVRS table error: %s\n" , err); |
3202 | return false; |
3203 | } |
3204 | |
3205 | acpi_put_table(table: ivrs_base); |
3206 | |
3207 | if (amd_iommu_force_enable) |
3208 | goto out; |
3209 | |
3210 | /* Don't use IOMMU if there is Stoney Ridge graphics */ |
3211 | for (i = 0; i < 32; i++) { |
3212 | u32 pci_id; |
3213 | |
3214 | pci_id = read_pci_config(bus: 0, slot: i, func: 0, offset: 0); |
3215 | if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { |
3216 | pr_info("Disable IOMMU on Stoney Ridge\n" ); |
3217 | return false; |
3218 | } |
3219 | } |
3220 | |
3221 | out: |
3222 | /* Make sure ACS will be enabled during PCI probe */ |
3223 | pci_request_acs(); |
3224 | |
3225 | return true; |
3226 | } |
3227 | |
3228 | static void iommu_snp_enable(void) |
3229 | { |
3230 | #ifdef CONFIG_KVM_AMD_SEV |
3231 | if (!cc_platform_has(attr: CC_ATTR_HOST_SEV_SNP)) |
3232 | return; |
3233 | /* |
3234 | * The SNP support requires that IOMMU must be enabled, and is |
3235 | * configured with V1 page table (DTE[Mode] = 0 is not supported). |
3236 | */ |
3237 | if (no_iommu || iommu_default_passthrough()) { |
3238 | pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n" ); |
3239 | goto disable_snp; |
3240 | } |
3241 | |
3242 | if (amd_iommu_pgtable != AMD_IOMMU_V1) { |
3243 | pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n" ); |
3244 | goto disable_snp; |
3245 | } |
3246 | |
3247 | amd_iommu_snp_en = check_feature(FEATURE_SNP); |
3248 | if (!amd_iommu_snp_en) { |
3249 | pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n" ); |
3250 | goto disable_snp; |
3251 | } |
3252 | |
3253 | pr_info("IOMMU SNP support enabled.\n" ); |
3254 | return; |
3255 | |
3256 | disable_snp: |
3257 | cc_platform_clear(attr: CC_ATTR_HOST_SEV_SNP); |
3258 | #endif |
3259 | } |
3260 | |
3261 | /**************************************************************************** |
3262 | * |
3263 | * AMD IOMMU Initialization State Machine |
3264 | * |
3265 | ****************************************************************************/ |
3266 | |
3267 | static int __init state_next(void) |
3268 | { |
3269 | int ret = 0; |
3270 | |
3271 | switch (init_state) { |
3272 | case IOMMU_START_STATE: |
3273 | if (!detect_ivrs()) { |
3274 | init_state = IOMMU_NOT_FOUND; |
3275 | ret = -ENODEV; |
3276 | } else { |
3277 | init_state = IOMMU_IVRS_DETECTED; |
3278 | } |
3279 | break; |
3280 | case IOMMU_IVRS_DETECTED: |
3281 | if (amd_iommu_disabled) { |
3282 | init_state = IOMMU_CMDLINE_DISABLED; |
3283 | ret = -EINVAL; |
3284 | } else { |
3285 | ret = early_amd_iommu_init(); |
3286 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; |
3287 | } |
3288 | break; |
3289 | case IOMMU_ACPI_FINISHED: |
3290 | early_enable_iommus(); |
3291 | x86_platform.iommu_shutdown = disable_iommus; |
3292 | init_state = IOMMU_ENABLED; |
3293 | break; |
3294 | case IOMMU_ENABLED: |
3295 | register_syscore_ops(ops: &amd_iommu_syscore_ops); |
3296 | iommu_snp_enable(); |
3297 | ret = amd_iommu_init_pci(); |
3298 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; |
3299 | break; |
3300 | case IOMMU_PCI_INIT: |
3301 | ret = amd_iommu_enable_interrupts(); |
3302 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; |
3303 | break; |
3304 | case IOMMU_INTERRUPTS_EN: |
3305 | init_state = IOMMU_INITIALIZED; |
3306 | break; |
3307 | case IOMMU_INITIALIZED: |
3308 | /* Nothing to do */ |
3309 | break; |
3310 | case IOMMU_NOT_FOUND: |
3311 | case IOMMU_INIT_ERROR: |
3312 | case IOMMU_CMDLINE_DISABLED: |
3313 | /* Error states => do nothing */ |
3314 | ret = -EINVAL; |
3315 | break; |
3316 | default: |
3317 | /* Unknown state */ |
3318 | BUG(); |
3319 | } |
3320 | |
3321 | if (ret) { |
3322 | free_dma_resources(); |
3323 | if (!irq_remapping_enabled) { |
3324 | disable_iommus(); |
3325 | free_iommu_resources(); |
3326 | } else { |
3327 | struct amd_iommu *iommu; |
3328 | struct amd_iommu_pci_seg *pci_seg; |
3329 | |
3330 | for_each_pci_segment(pci_seg) |
3331 | uninit_device_table_dma(pci_seg); |
3332 | |
3333 | for_each_iommu(iommu) |
3334 | amd_iommu_flush_all_caches(iommu); |
3335 | } |
3336 | } |
3337 | return ret; |
3338 | } |
3339 | |
3340 | static int __init iommu_go_to_state(enum iommu_init_state state) |
3341 | { |
3342 | int ret = -EINVAL; |
3343 | |
3344 | while (init_state != state) { |
3345 | if (init_state == IOMMU_NOT_FOUND || |
3346 | init_state == IOMMU_INIT_ERROR || |
3347 | init_state == IOMMU_CMDLINE_DISABLED) |
3348 | break; |
3349 | ret = state_next(); |
3350 | } |
3351 | |
3352 | return ret; |
3353 | } |
3354 | |
3355 | #ifdef CONFIG_IRQ_REMAP |
3356 | int __init amd_iommu_prepare(void) |
3357 | { |
3358 | int ret; |
3359 | |
3360 | amd_iommu_irq_remap = true; |
3361 | |
3362 | ret = iommu_go_to_state(state: IOMMU_ACPI_FINISHED); |
3363 | if (ret) { |
3364 | amd_iommu_irq_remap = false; |
3365 | return ret; |
3366 | } |
3367 | |
3368 | return amd_iommu_irq_remap ? 0 : -ENODEV; |
3369 | } |
3370 | |
3371 | int __init amd_iommu_enable(void) |
3372 | { |
3373 | int ret; |
3374 | |
3375 | ret = iommu_go_to_state(state: IOMMU_ENABLED); |
3376 | if (ret) |
3377 | return ret; |
3378 | |
3379 | irq_remapping_enabled = 1; |
3380 | return amd_iommu_xt_mode; |
3381 | } |
3382 | |
3383 | void amd_iommu_disable(void) |
3384 | { |
3385 | amd_iommu_suspend(); |
3386 | } |
3387 | |
3388 | int amd_iommu_reenable(int mode) |
3389 | { |
3390 | amd_iommu_resume(); |
3391 | |
3392 | return 0; |
3393 | } |
3394 | |
3395 | int __init amd_iommu_enable_faulting(void) |
3396 | { |
3397 | /* We enable MSI later when PCI is initialized */ |
3398 | return 0; |
3399 | } |
3400 | #endif |
3401 | |
3402 | /* |
3403 | * This is the core init function for AMD IOMMU hardware in the system. |
3404 | * This function is called from the generic x86 DMA layer initialization |
3405 | * code. |
3406 | */ |
3407 | static int __init amd_iommu_init(void) |
3408 | { |
3409 | struct amd_iommu *iommu; |
3410 | int ret; |
3411 | |
3412 | ret = iommu_go_to_state(state: IOMMU_INITIALIZED); |
3413 | #ifdef CONFIG_GART_IOMMU |
3414 | if (ret && list_empty(head: &amd_iommu_list)) { |
3415 | /* |
3416 | * We failed to initialize the AMD IOMMU - try fallback |
3417 | * to GART if possible. |
3418 | */ |
3419 | gart_iommu_init(); |
3420 | } |
3421 | #endif |
3422 | |
3423 | for_each_iommu(iommu) |
3424 | amd_iommu_debugfs_setup(iommu); |
3425 | |
3426 | return ret; |
3427 | } |
3428 | |
3429 | static bool amd_iommu_sme_check(void) |
3430 | { |
3431 | if (!cc_platform_has(attr: CC_ATTR_HOST_MEM_ENCRYPT) || |
3432 | (boot_cpu_data.x86 != 0x17)) |
3433 | return true; |
3434 | |
3435 | /* For Fam17h, a specific level of support is required */ |
3436 | if (boot_cpu_data.microcode >= 0x08001205) |
3437 | return true; |
3438 | |
3439 | if ((boot_cpu_data.microcode >= 0x08001126) && |
3440 | (boot_cpu_data.microcode <= 0x080011ff)) |
3441 | return true; |
3442 | |
3443 | pr_notice("IOMMU not currently supported when SME is active\n" ); |
3444 | |
3445 | return false; |
3446 | } |
3447 | |
3448 | /**************************************************************************** |
3449 | * |
3450 | * Early detect code. This code runs at IOMMU detection time in the DMA |
3451 | * layer. It just looks if there is an IVRS ACPI table to detect AMD |
3452 | * IOMMUs |
3453 | * |
3454 | ****************************************************************************/ |
3455 | int __init amd_iommu_detect(void) |
3456 | { |
3457 | int ret; |
3458 | |
3459 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) |
3460 | return -ENODEV; |
3461 | |
3462 | if (!amd_iommu_sme_check()) |
3463 | return -ENODEV; |
3464 | |
3465 | ret = iommu_go_to_state(state: IOMMU_IVRS_DETECTED); |
3466 | if (ret) |
3467 | return ret; |
3468 | |
3469 | amd_iommu_detected = true; |
3470 | iommu_detected = 1; |
3471 | x86_init.iommu.iommu_init = amd_iommu_init; |
3472 | |
3473 | return 1; |
3474 | } |
3475 | |
3476 | /**************************************************************************** |
3477 | * |
3478 | * Parsing functions for the AMD IOMMU specific kernel command line |
3479 | * options. |
3480 | * |
3481 | ****************************************************************************/ |
3482 | |
3483 | static int __init parse_amd_iommu_dump(char *str) |
3484 | { |
3485 | amd_iommu_dump = true; |
3486 | |
3487 | return 1; |
3488 | } |
3489 | |
3490 | static int __init parse_amd_iommu_intr(char *str) |
3491 | { |
3492 | for (; *str; ++str) { |
3493 | if (strncmp(str, "legacy" , 6) == 0) { |
3494 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; |
3495 | break; |
3496 | } |
3497 | if (strncmp(str, "vapic" , 5) == 0) { |
3498 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; |
3499 | break; |
3500 | } |
3501 | } |
3502 | return 1; |
3503 | } |
3504 | |
3505 | static int __init parse_amd_iommu_options(char *str) |
3506 | { |
3507 | if (!str) |
3508 | return -EINVAL; |
3509 | |
3510 | while (*str) { |
3511 | if (strncmp(str, "fullflush" , 9) == 0) { |
3512 | pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n" ); |
3513 | iommu_set_dma_strict(); |
3514 | } else if (strncmp(str, "force_enable" , 12) == 0) { |
3515 | amd_iommu_force_enable = true; |
3516 | } else if (strncmp(str, "off" , 3) == 0) { |
3517 | amd_iommu_disabled = true; |
3518 | } else if (strncmp(str, "force_isolation" , 15) == 0) { |
3519 | amd_iommu_force_isolation = true; |
3520 | } else if (strncmp(str, "pgtbl_v1" , 8) == 0) { |
3521 | amd_iommu_pgtable = AMD_IOMMU_V1; |
3522 | } else if (strncmp(str, "pgtbl_v2" , 8) == 0) { |
3523 | amd_iommu_pgtable = AMD_IOMMU_V2; |
3524 | } else if (strncmp(str, "irtcachedis" , 11) == 0) { |
3525 | amd_iommu_irtcachedis = true; |
3526 | } else { |
3527 | pr_notice("Unknown option - '%s'\n" , str); |
3528 | } |
3529 | |
3530 | str += strcspn(str, "," ); |
3531 | while (*str == ',') |
3532 | str++; |
3533 | } |
3534 | |
3535 | return 1; |
3536 | } |
3537 | |
3538 | static int __init parse_ivrs_ioapic(char *str) |
3539 | { |
3540 | u32 seg = 0, bus, dev, fn; |
3541 | int id, i; |
3542 | u32 devid; |
3543 | |
3544 | if (sscanf(str, "=%d@%x:%x.%x" , &id, &bus, &dev, &fn) == 4 || |
3545 | sscanf(str, "=%d@%x:%x:%x.%x" , &id, &seg, &bus, &dev, &fn) == 5) |
3546 | goto found; |
3547 | |
3548 | if (sscanf(str, "[%d]=%x:%x.%x" , &id, &bus, &dev, &fn) == 4 || |
3549 | sscanf(str, "[%d]=%x:%x:%x.%x" , &id, &seg, &bus, &dev, &fn) == 5) { |
3550 | pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n" , |
3551 | str, id, seg, bus, dev, fn); |
3552 | goto found; |
3553 | } |
3554 | |
3555 | pr_err("Invalid command line: ivrs_ioapic%s\n" , str); |
3556 | return 1; |
3557 | |
3558 | found: |
3559 | if (early_ioapic_map_size == EARLY_MAP_SIZE) { |
3560 | pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n" , |
3561 | str); |
3562 | return 1; |
3563 | } |
3564 | |
3565 | devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); |
3566 | |
3567 | cmdline_maps = true; |
3568 | i = early_ioapic_map_size++; |
3569 | early_ioapic_map[i].id = id; |
3570 | early_ioapic_map[i].devid = devid; |
3571 | early_ioapic_map[i].cmd_line = true; |
3572 | |
3573 | return 1; |
3574 | } |
3575 | |
3576 | static int __init parse_ivrs_hpet(char *str) |
3577 | { |
3578 | u32 seg = 0, bus, dev, fn; |
3579 | int id, i; |
3580 | u32 devid; |
3581 | |
3582 | if (sscanf(str, "=%d@%x:%x.%x" , &id, &bus, &dev, &fn) == 4 || |
3583 | sscanf(str, "=%d@%x:%x:%x.%x" , &id, &seg, &bus, &dev, &fn) == 5) |
3584 | goto found; |
3585 | |
3586 | if (sscanf(str, "[%d]=%x:%x.%x" , &id, &bus, &dev, &fn) == 4 || |
3587 | sscanf(str, "[%d]=%x:%x:%x.%x" , &id, &seg, &bus, &dev, &fn) == 5) { |
3588 | pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n" , |
3589 | str, id, seg, bus, dev, fn); |
3590 | goto found; |
3591 | } |
3592 | |
3593 | pr_err("Invalid command line: ivrs_hpet%s\n" , str); |
3594 | return 1; |
3595 | |
3596 | found: |
3597 | if (early_hpet_map_size == EARLY_MAP_SIZE) { |
3598 | pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n" , |
3599 | str); |
3600 | return 1; |
3601 | } |
3602 | |
3603 | devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); |
3604 | |
3605 | cmdline_maps = true; |
3606 | i = early_hpet_map_size++; |
3607 | early_hpet_map[i].id = id; |
3608 | early_hpet_map[i].devid = devid; |
3609 | early_hpet_map[i].cmd_line = true; |
3610 | |
3611 | return 1; |
3612 | } |
3613 | |
3614 | #define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN) |
3615 | |
3616 | static int __init parse_ivrs_acpihid(char *str) |
3617 | { |
3618 | u32 seg = 0, bus, dev, fn; |
3619 | char *hid, *uid, *p, *addr; |
3620 | char acpiid[ACPIID_LEN] = {0}; |
3621 | int i; |
3622 | |
3623 | addr = strchr(str, '@'); |
3624 | if (!addr) { |
3625 | addr = strchr(str, '='); |
3626 | if (!addr) |
3627 | goto not_found; |
3628 | |
3629 | ++addr; |
3630 | |
3631 | if (strlen(addr) > ACPIID_LEN) |
3632 | goto not_found; |
3633 | |
3634 | if (sscanf(str, "[%x:%x.%x]=%s" , &bus, &dev, &fn, acpiid) == 4 || |
3635 | sscanf(str, "[%x:%x:%x.%x]=%s" , &seg, &bus, &dev, &fn, acpiid) == 5) { |
3636 | pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n" , |
3637 | str, acpiid, seg, bus, dev, fn); |
3638 | goto found; |
3639 | } |
3640 | goto not_found; |
3641 | } |
3642 | |
3643 | /* We have the '@', make it the terminator to get just the acpiid */ |
3644 | *addr++ = 0; |
3645 | |
3646 | if (strlen(str) > ACPIID_LEN + 1) |
3647 | goto not_found; |
3648 | |
3649 | if (sscanf(str, "=%s" , acpiid) != 1) |
3650 | goto not_found; |
3651 | |
3652 | if (sscanf(addr, "%x:%x.%x" , &bus, &dev, &fn) == 3 || |
3653 | sscanf(addr, "%x:%x:%x.%x" , &seg, &bus, &dev, &fn) == 4) |
3654 | goto found; |
3655 | |
3656 | not_found: |
3657 | pr_err("Invalid command line: ivrs_acpihid%s\n" , str); |
3658 | return 1; |
3659 | |
3660 | found: |
3661 | p = acpiid; |
3662 | hid = strsep(&p, ":" ); |
3663 | uid = p; |
3664 | |
3665 | if (!hid || !(*hid) || !uid) { |
3666 | pr_err("Invalid command line: hid or uid\n" ); |
3667 | return 1; |
3668 | } |
3669 | |
3670 | /* |
3671 | * Ignore leading zeroes after ':', so e.g., AMDI0095:00 |
3672 | * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match |
3673 | */ |
3674 | while (*uid == '0' && *(uid + 1)) |
3675 | uid++; |
3676 | |
3677 | i = early_acpihid_map_size++; |
3678 | memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); |
3679 | memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); |
3680 | early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); |
3681 | early_acpihid_map[i].cmd_line = true; |
3682 | |
3683 | return 1; |
3684 | } |
3685 | |
3686 | __setup("amd_iommu_dump" , parse_amd_iommu_dump); |
3687 | __setup("amd_iommu=" , parse_amd_iommu_options); |
3688 | __setup("amd_iommu_intr=" , parse_amd_iommu_intr); |
3689 | __setup("ivrs_ioapic" , parse_ivrs_ioapic); |
3690 | __setup("ivrs_hpet" , parse_ivrs_hpet); |
3691 | __setup("ivrs_acpihid" , parse_ivrs_acpihid); |
3692 | |
3693 | bool amd_iommu_v2_supported(void) |
3694 | { |
3695 | /* CPU page table size should match IOMMU guest page table size */ |
3696 | if (cpu_feature_enabled(X86_FEATURE_LA57) && |
3697 | amd_iommu_gpt_level != PAGE_MODE_5_LEVEL) |
3698 | return false; |
3699 | |
3700 | /* |
3701 | * Since DTE[Mode]=0 is prohibited on SNP-enabled system |
3702 | * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without |
3703 | * setting up IOMMUv1 page table. |
3704 | */ |
3705 | return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en; |
3706 | } |
3707 | |
3708 | struct amd_iommu *get_amd_iommu(unsigned int idx) |
3709 | { |
3710 | unsigned int i = 0; |
3711 | struct amd_iommu *iommu; |
3712 | |
3713 | for_each_iommu(iommu) |
3714 | if (i++ == idx) |
3715 | return iommu; |
3716 | return NULL; |
3717 | } |
3718 | |
3719 | /**************************************************************************** |
3720 | * |
3721 | * IOMMU EFR Performance Counter support functionality. This code allows |
3722 | * access to the IOMMU PC functionality. |
3723 | * |
3724 | ****************************************************************************/ |
3725 | |
3726 | u8 amd_iommu_pc_get_max_banks(unsigned int idx) |
3727 | { |
3728 | struct amd_iommu *iommu = get_amd_iommu(idx); |
3729 | |
3730 | if (iommu) |
3731 | return iommu->max_banks; |
3732 | |
3733 | return 0; |
3734 | } |
3735 | |
3736 | bool amd_iommu_pc_supported(void) |
3737 | { |
3738 | return amd_iommu_pc_present; |
3739 | } |
3740 | |
3741 | u8 amd_iommu_pc_get_max_counters(unsigned int idx) |
3742 | { |
3743 | struct amd_iommu *iommu = get_amd_iommu(idx); |
3744 | |
3745 | if (iommu) |
3746 | return iommu->max_counters; |
3747 | |
3748 | return 0; |
3749 | } |
3750 | |
3751 | static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, |
3752 | u8 fxn, u64 *value, bool is_write) |
3753 | { |
3754 | u32 offset; |
3755 | u32 max_offset_lim; |
3756 | |
3757 | /* Make sure the IOMMU PC resource is available */ |
3758 | if (!amd_iommu_pc_present) |
3759 | return -ENODEV; |
3760 | |
3761 | /* Check for valid iommu and pc register indexing */ |
3762 | if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) |
3763 | return -ENODEV; |
3764 | |
3765 | offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); |
3766 | |
3767 | /* Limit the offset to the hw defined mmio region aperture */ |
3768 | max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | |
3769 | (iommu->max_counters << 8) | 0x28); |
3770 | if ((offset < MMIO_CNTR_REG_OFFSET) || |
3771 | (offset > max_offset_lim)) |
3772 | return -EINVAL; |
3773 | |
3774 | if (is_write) { |
3775 | u64 val = *value & GENMASK_ULL(47, 0); |
3776 | |
3777 | writel(val: (u32)val, addr: iommu->mmio_base + offset); |
3778 | writel(val: (val >> 32), addr: iommu->mmio_base + offset + 4); |
3779 | } else { |
3780 | *value = readl(addr: iommu->mmio_base + offset + 4); |
3781 | *value <<= 32; |
3782 | *value |= readl(addr: iommu->mmio_base + offset); |
3783 | *value &= GENMASK_ULL(47, 0); |
3784 | } |
3785 | |
3786 | return 0; |
3787 | } |
3788 | |
3789 | int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) |
3790 | { |
3791 | if (!iommu) |
3792 | return -EINVAL; |
3793 | |
3794 | return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, is_write: false); |
3795 | } |
3796 | |
3797 | int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) |
3798 | { |
3799 | if (!iommu) |
3800 | return -EINVAL; |
3801 | |
3802 | return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, is_write: true); |
3803 | } |
3804 | |
3805 | #ifdef CONFIG_KVM_AMD_SEV |
3806 | static int iommu_page_make_shared(void *page) |
3807 | { |
3808 | unsigned long paddr, pfn; |
3809 | |
3810 | paddr = iommu_virt_to_phys(vaddr: page); |
3811 | /* Cbit maybe set in the paddr */ |
3812 | pfn = __sme_clr(paddr) >> PAGE_SHIFT; |
3813 | |
3814 | if (!(pfn % PTRS_PER_PMD)) { |
3815 | int ret, level; |
3816 | bool assigned; |
3817 | |
3818 | ret = snp_lookup_rmpentry(pfn, assigned: &assigned, level: &level); |
3819 | if (ret) { |
3820 | pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n" , pfn, ret); |
3821 | return ret; |
3822 | } |
3823 | |
3824 | if (!assigned) { |
3825 | pr_warn("IOMMU PFN %lx not assigned in RMP table\n" , pfn); |
3826 | return -EINVAL; |
3827 | } |
3828 | |
3829 | if (level > PG_LEVEL_4K) { |
3830 | ret = psmash(pfn); |
3831 | if (!ret) |
3832 | goto done; |
3833 | |
3834 | pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n" , |
3835 | pfn, ret, level); |
3836 | return ret; |
3837 | } |
3838 | } |
3839 | |
3840 | done: |
3841 | return rmp_make_shared(pfn, level: PG_LEVEL_4K); |
3842 | } |
3843 | |
3844 | static int iommu_make_shared(void *va, size_t size) |
3845 | { |
3846 | void *page; |
3847 | int ret; |
3848 | |
3849 | if (!va) |
3850 | return 0; |
3851 | |
3852 | for (page = va; page < (va + size); page += PAGE_SIZE) { |
3853 | ret = iommu_page_make_shared(page); |
3854 | if (ret) |
3855 | return ret; |
3856 | } |
3857 | |
3858 | return 0; |
3859 | } |
3860 | |
3861 | int amd_iommu_snp_disable(void) |
3862 | { |
3863 | struct amd_iommu *iommu; |
3864 | int ret; |
3865 | |
3866 | if (!amd_iommu_snp_en) |
3867 | return 0; |
3868 | |
3869 | for_each_iommu(iommu) { |
3870 | ret = iommu_make_shared(va: iommu->evt_buf, EVT_BUFFER_SIZE); |
3871 | if (ret) |
3872 | return ret; |
3873 | |
3874 | ret = iommu_make_shared(va: iommu->ppr_log, PPR_LOG_SIZE); |
3875 | if (ret) |
3876 | return ret; |
3877 | |
3878 | ret = iommu_make_shared(va: (void *)iommu->cmd_sem, PAGE_SIZE); |
3879 | if (ret) |
3880 | return ret; |
3881 | } |
3882 | |
3883 | return 0; |
3884 | } |
3885 | EXPORT_SYMBOL_GPL(amd_iommu_snp_disable); |
3886 | #endif |
3887 | |