1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
4 | * Author: Joerg Roedel <jroedel@suse.de> |
5 | * Leo Duran <leo.duran@amd.com> |
6 | */ |
7 | |
8 | #ifndef _ASM_X86_AMD_IOMMU_TYPES_H |
9 | #define _ASM_X86_AMD_IOMMU_TYPES_H |
10 | |
11 | #include <linux/types.h> |
12 | #include <linux/mutex.h> |
13 | #include <linux/msi.h> |
14 | #include <linux/list.h> |
15 | #include <linux/spinlock.h> |
16 | #include <linux/pci.h> |
17 | #include <linux/irqreturn.h> |
18 | #include <linux/io-pgtable.h> |
19 | |
20 | /* |
21 | * Maximum number of IOMMUs supported |
22 | */ |
23 | #define MAX_IOMMUS 32 |
24 | |
25 | /* |
26 | * some size calculation constants |
27 | */ |
28 | #define DEV_TABLE_ENTRY_SIZE 32 |
29 | #define ALIAS_TABLE_ENTRY_SIZE 2 |
30 | #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) |
31 | |
32 | /* Capability offsets used by the driver */ |
33 | #define MMIO_CAP_HDR_OFFSET 0x00 |
34 | #define MMIO_RANGE_OFFSET 0x0c |
35 | #define MMIO_MISC_OFFSET 0x10 |
36 | |
37 | /* Masks, shifts and macros to parse the device range capability */ |
38 | #define MMIO_RANGE_LD_MASK 0xff000000 |
39 | #define MMIO_RANGE_FD_MASK 0x00ff0000 |
40 | #define MMIO_RANGE_BUS_MASK 0x0000ff00 |
41 | #define MMIO_RANGE_LD_SHIFT 24 |
42 | #define MMIO_RANGE_FD_SHIFT 16 |
43 | #define MMIO_RANGE_BUS_SHIFT 8 |
44 | #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) |
45 | #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) |
46 | #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) |
47 | #define MMIO_MSI_NUM(x) ((x) & 0x1f) |
48 | |
49 | /* Flag masks for the AMD IOMMU exclusion range */ |
50 | #define MMIO_EXCL_ENABLE_MASK 0x01ULL |
51 | #define MMIO_EXCL_ALLOW_MASK 0x02ULL |
52 | |
53 | /* Used offsets into the MMIO space */ |
54 | #define MMIO_DEV_TABLE_OFFSET 0x0000 |
55 | #define MMIO_CMD_BUF_OFFSET 0x0008 |
56 | #define MMIO_EVT_BUF_OFFSET 0x0010 |
57 | #define MMIO_CONTROL_OFFSET 0x0018 |
58 | #define MMIO_EXCL_BASE_OFFSET 0x0020 |
59 | #define MMIO_EXCL_LIMIT_OFFSET 0x0028 |
60 | #define MMIO_EXT_FEATURES 0x0030 |
61 | #define MMIO_PPR_LOG_OFFSET 0x0038 |
62 | #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 |
63 | #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 |
64 | #define MMIO_MSI_ADDR_LO_OFFSET 0x015C |
65 | #define MMIO_MSI_ADDR_HI_OFFSET 0x0160 |
66 | #define MMIO_MSI_DATA_OFFSET 0x0164 |
67 | #define MMIO_INTCAPXT_EVT_OFFSET 0x0170 |
68 | #define MMIO_INTCAPXT_PPR_OFFSET 0x0178 |
69 | #define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 |
70 | #define MMIO_EXT_FEATURES2 0x01A0 |
71 | #define MMIO_CMD_HEAD_OFFSET 0x2000 |
72 | #define MMIO_CMD_TAIL_OFFSET 0x2008 |
73 | #define MMIO_EVT_HEAD_OFFSET 0x2010 |
74 | #define MMIO_EVT_TAIL_OFFSET 0x2018 |
75 | #define MMIO_STATUS_OFFSET 0x2020 |
76 | #define MMIO_PPR_HEAD_OFFSET 0x2030 |
77 | #define MMIO_PPR_TAIL_OFFSET 0x2038 |
78 | #define MMIO_GA_HEAD_OFFSET 0x2040 |
79 | #define MMIO_GA_TAIL_OFFSET 0x2048 |
80 | #define MMIO_CNTR_CONF_OFFSET 0x4000 |
81 | #define MMIO_CNTR_REG_OFFSET 0x40000 |
82 | #define MMIO_REG_END_OFFSET 0x80000 |
83 | |
84 | |
85 | |
86 | /* Extended Feature Bits */ |
87 | #define FEATURE_PREFETCH BIT_ULL(0) |
88 | #define FEATURE_PPR BIT_ULL(1) |
89 | #define FEATURE_X2APIC BIT_ULL(2) |
90 | #define FEATURE_NX BIT_ULL(3) |
91 | #define FEATURE_GT BIT_ULL(4) |
92 | #define FEATURE_IA BIT_ULL(6) |
93 | #define FEATURE_GA BIT_ULL(7) |
94 | #define FEATURE_HE BIT_ULL(8) |
95 | #define FEATURE_PC BIT_ULL(9) |
96 | #define FEATURE_GATS_SHIFT (12) |
97 | #define FEATURE_GATS_MASK (3ULL) |
98 | #define FEATURE_GAM_VAPIC BIT_ULL(21) |
99 | #define FEATURE_GIOSUP BIT_ULL(48) |
100 | #define FEATURE_HASUP BIT_ULL(49) |
101 | #define FEATURE_EPHSUP BIT_ULL(50) |
102 | #define FEATURE_HDSUP BIT_ULL(52) |
103 | #define FEATURE_SNP BIT_ULL(63) |
104 | |
105 | #define FEATURE_PASID_SHIFT 32 |
106 | #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) |
107 | |
108 | #define FEATURE_GLXVAL_SHIFT 14 |
109 | #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) |
110 | |
111 | /* Extended Feature 2 Bits */ |
112 | #define FEATURE_SNPAVICSUP_SHIFT 5 |
113 | #define FEATURE_SNPAVICSUP_MASK (0x07ULL << FEATURE_SNPAVICSUP_SHIFT) |
114 | #define FEATURE_SNPAVICSUP_GAM(x) \ |
115 | ((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1) |
116 | |
117 | /* Note: |
118 | * The current driver only support 16-bit PASID. |
119 | * Currently, hardware only implement upto 16-bit PASID |
120 | * even though the spec says it could have upto 20 bits. |
121 | */ |
122 | #define PASID_MASK 0x0000ffff |
123 | |
124 | /* MMIO status bits */ |
125 | #define MMIO_STATUS_EVT_OVERFLOW_MASK BIT(0) |
126 | #define MMIO_STATUS_EVT_INT_MASK BIT(1) |
127 | #define MMIO_STATUS_COM_WAIT_INT_MASK BIT(2) |
128 | #define MMIO_STATUS_EVT_RUN_MASK BIT(3) |
129 | #define MMIO_STATUS_PPR_OVERFLOW_MASK BIT(5) |
130 | #define MMIO_STATUS_PPR_INT_MASK BIT(6) |
131 | #define MMIO_STATUS_PPR_RUN_MASK BIT(7) |
132 | #define MMIO_STATUS_GALOG_RUN_MASK BIT(8) |
133 | #define MMIO_STATUS_GALOG_OVERFLOW_MASK BIT(9) |
134 | #define MMIO_STATUS_GALOG_INT_MASK BIT(10) |
135 | |
136 | /* event logging constants */ |
137 | #define EVENT_ENTRY_SIZE 0x10 |
138 | #define EVENT_TYPE_SHIFT 28 |
139 | #define EVENT_TYPE_MASK 0xf |
140 | #define EVENT_TYPE_ILL_DEV 0x1 |
141 | #define EVENT_TYPE_IO_FAULT 0x2 |
142 | #define EVENT_TYPE_DEV_TAB_ERR 0x3 |
143 | #define EVENT_TYPE_PAGE_TAB_ERR 0x4 |
144 | #define EVENT_TYPE_ILL_CMD 0x5 |
145 | #define EVENT_TYPE_CMD_HARD_ERR 0x6 |
146 | #define EVENT_TYPE_IOTLB_INV_TO 0x7 |
147 | #define EVENT_TYPE_INV_DEV_REQ 0x8 |
148 | #define EVENT_TYPE_INV_PPR_REQ 0x9 |
149 | #define EVENT_TYPE_RMP_FAULT 0xd |
150 | #define EVENT_TYPE_RMP_HW_ERR 0xe |
151 | #define EVENT_DEVID_MASK 0xffff |
152 | #define EVENT_DEVID_SHIFT 0 |
153 | #define EVENT_DOMID_MASK_LO 0xffff |
154 | #define EVENT_DOMID_MASK_HI 0xf0000 |
155 | #define EVENT_FLAGS_MASK 0xfff |
156 | #define EVENT_FLAGS_SHIFT 0x10 |
157 | #define EVENT_FLAG_RW 0x020 |
158 | #define EVENT_FLAG_I 0x008 |
159 | |
160 | /* feature control bits */ |
161 | #define CONTROL_IOMMU_EN 0 |
162 | #define CONTROL_HT_TUN_EN 1 |
163 | #define CONTROL_EVT_LOG_EN 2 |
164 | #define CONTROL_EVT_INT_EN 3 |
165 | #define CONTROL_COMWAIT_EN 4 |
166 | #define CONTROL_INV_TIMEOUT 5 |
167 | #define CONTROL_PASSPW_EN 8 |
168 | #define CONTROL_RESPASSPW_EN 9 |
169 | #define CONTROL_COHERENT_EN 10 |
170 | #define CONTROL_ISOC_EN 11 |
171 | #define CONTROL_CMDBUF_EN 12 |
172 | #define CONTROL_PPRLOG_EN 13 |
173 | #define CONTROL_PPRINT_EN 14 |
174 | #define CONTROL_PPR_EN 15 |
175 | #define CONTROL_GT_EN 16 |
176 | #define CONTROL_GA_EN 17 |
177 | #define CONTROL_GAM_EN 25 |
178 | #define CONTROL_GALOG_EN 28 |
179 | #define CONTROL_GAINT_EN 29 |
180 | #define CONTROL_XT_EN 50 |
181 | #define CONTROL_INTCAPXT_EN 51 |
182 | #define CONTROL_IRTCACHEDIS 59 |
183 | #define CONTROL_SNPAVIC_EN 61 |
184 | |
185 | #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) |
186 | #define CTRL_INV_TO_NONE 0 |
187 | #define CTRL_INV_TO_1MS 1 |
188 | #define CTRL_INV_TO_10MS 2 |
189 | #define CTRL_INV_TO_100MS 3 |
190 | #define CTRL_INV_TO_1S 4 |
191 | #define CTRL_INV_TO_10S 5 |
192 | #define CTRL_INV_TO_100S 6 |
193 | |
194 | /* command specific defines */ |
195 | #define CMD_COMPL_WAIT 0x01 |
196 | #define CMD_INV_DEV_ENTRY 0x02 |
197 | #define CMD_INV_IOMMU_PAGES 0x03 |
198 | #define CMD_INV_IOTLB_PAGES 0x04 |
199 | #define CMD_INV_IRT 0x05 |
200 | #define CMD_COMPLETE_PPR 0x07 |
201 | #define CMD_INV_ALL 0x08 |
202 | |
203 | #define CMD_COMPL_WAIT_STORE_MASK 0x01 |
204 | #define CMD_COMPL_WAIT_INT_MASK 0x02 |
205 | #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 |
206 | #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 |
207 | #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 |
208 | |
209 | #define PPR_STATUS_MASK 0xf |
210 | #define PPR_STATUS_SHIFT 12 |
211 | |
212 | #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL |
213 | |
214 | /* macros and definitions for device table entries */ |
215 | #define DEV_ENTRY_VALID 0x00 |
216 | #define DEV_ENTRY_TRANSLATION 0x01 |
217 | #define DEV_ENTRY_HAD 0x07 |
218 | #define DEV_ENTRY_PPR 0x34 |
219 | #define DEV_ENTRY_IR 0x3d |
220 | #define DEV_ENTRY_IW 0x3e |
221 | #define DEV_ENTRY_NO_PAGE_FAULT 0x62 |
222 | #define DEV_ENTRY_EX 0x67 |
223 | #define DEV_ENTRY_SYSMGT1 0x68 |
224 | #define DEV_ENTRY_SYSMGT2 0x69 |
225 | #define DEV_ENTRY_IRQ_TBL_EN 0x80 |
226 | #define DEV_ENTRY_INIT_PASS 0xb8 |
227 | #define DEV_ENTRY_EINT_PASS 0xb9 |
228 | #define DEV_ENTRY_NMI_PASS 0xba |
229 | #define DEV_ENTRY_LINT0_PASS 0xbe |
230 | #define DEV_ENTRY_LINT1_PASS 0xbf |
231 | #define DEV_ENTRY_MODE_MASK 0x07 |
232 | #define DEV_ENTRY_MODE_SHIFT 0x09 |
233 | |
234 | #define MAX_DEV_TABLE_ENTRIES 0xffff |
235 | |
236 | /* constants to configure the command buffer */ |
237 | #define CMD_BUFFER_SIZE 8192 |
238 | #define CMD_BUFFER_UNINITIALIZED 1 |
239 | #define CMD_BUFFER_ENTRIES 512 |
240 | #define MMIO_CMD_SIZE_SHIFT 56 |
241 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) |
242 | |
243 | /* constants for event buffer handling */ |
244 | #define EVT_BUFFER_SIZE 8192 /* 512 entries */ |
245 | #define EVT_LEN_MASK (0x9ULL << 56) |
246 | |
247 | /* Constants for PPR Log handling */ |
248 | #define PPR_LOG_ENTRIES 512 |
249 | #define PPR_LOG_SIZE_SHIFT 56 |
250 | #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) |
251 | #define PPR_ENTRY_SIZE 16 |
252 | #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) |
253 | |
254 | #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) |
255 | #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) |
256 | #define PPR_DEVID(x) ((x) & 0xffffULL) |
257 | #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) |
258 | #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) |
259 | #define PPR_PASID2(x) (((x) >> 42) & 0xfULL) |
260 | #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) |
261 | |
262 | #define PPR_REQ_FAULT 0x01 |
263 | |
264 | /* Constants for GA Log handling */ |
265 | #define GA_LOG_ENTRIES 512 |
266 | #define GA_LOG_SIZE_SHIFT 56 |
267 | #define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT) |
268 | #define GA_ENTRY_SIZE 8 |
269 | #define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES) |
270 | |
271 | #define GA_TAG(x) (u32)(x & 0xffffffffULL) |
272 | #define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL) |
273 | #define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL) |
274 | |
275 | #define GA_GUEST_NR 0x1 |
276 | |
277 | #define IOMMU_IN_ADDR_BIT_SIZE 52 |
278 | #define IOMMU_OUT_ADDR_BIT_SIZE 52 |
279 | |
280 | /* |
281 | * This bitmap is used to advertise the page sizes our hardware support |
282 | * to the IOMMU core, which will then use this information to split |
283 | * physically contiguous memory regions it is mapping into page sizes |
284 | * that we support. |
285 | * |
286 | * 512GB Pages are not supported due to a hardware bug |
287 | */ |
288 | #define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) |
289 | /* 4K, 2MB, 1G page sizes are supported */ |
290 | #define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30)) |
291 | |
292 | /* Bit value definition for dte irq remapping fields*/ |
293 | #define DTE_IRQ_PHYS_ADDR_MASK GENMASK_ULL(51, 6) |
294 | #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) |
295 | #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) |
296 | #define DTE_IRQ_REMAP_ENABLE 1ULL |
297 | |
298 | /* |
299 | * AMD IOMMU hardware only support 512 IRTEs despite |
300 | * the architectural limitation of 2048 entries. |
301 | */ |
302 | #define DTE_INTTAB_ALIGNMENT 128 |
303 | #define DTE_INTTABLEN_VALUE 9ULL |
304 | #define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1) |
305 | #define DTE_INTTABLEN_MASK (0xfULL << 1) |
306 | #define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE) |
307 | |
308 | #define PAGE_MODE_NONE 0x00 |
309 | #define PAGE_MODE_1_LEVEL 0x01 |
310 | #define PAGE_MODE_2_LEVEL 0x02 |
311 | #define PAGE_MODE_3_LEVEL 0x03 |
312 | #define PAGE_MODE_4_LEVEL 0x04 |
313 | #define PAGE_MODE_5_LEVEL 0x05 |
314 | #define PAGE_MODE_6_LEVEL 0x06 |
315 | #define PAGE_MODE_7_LEVEL 0x07 |
316 | |
317 | #define GUEST_PGTABLE_4_LEVEL 0x00 |
318 | #define GUEST_PGTABLE_5_LEVEL 0x01 |
319 | |
320 | #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) |
321 | #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ |
322 | ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ |
323 | (0xffffffffffffffffULL)) |
324 | #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) |
325 | #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) |
326 | #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ |
327 | IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) |
328 | #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) |
329 | |
330 | #define PM_MAP_4k 0 |
331 | #define PM_ADDR_MASK 0x000ffffffffff000ULL |
332 | #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ |
333 | (~((1ULL << (12 + ((lvl) * 9))) - 1))) |
334 | #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) |
335 | |
336 | /* |
337 | * Returns the page table level to use for a given page size |
338 | * Pagesize is expected to be a power-of-two |
339 | */ |
340 | #define PAGE_SIZE_LEVEL(pagesize) \ |
341 | ((__ffs(pagesize) - 12) / 9) |
342 | /* |
343 | * Returns the number of ptes to use for a given page size |
344 | * Pagesize is expected to be a power-of-two |
345 | */ |
346 | #define PAGE_SIZE_PTE_COUNT(pagesize) \ |
347 | (1ULL << ((__ffs(pagesize) - 12) % 9)) |
348 | |
349 | /* |
350 | * Aligns a given io-virtual address to a given page size |
351 | * Pagesize is expected to be a power-of-two |
352 | */ |
353 | #define PAGE_SIZE_ALIGN(address, pagesize) \ |
354 | ((address) & ~((pagesize) - 1)) |
355 | /* |
356 | * Creates an IOMMU PTE for an address and a given pagesize |
357 | * The PTE has no permission bits set |
358 | * Pagesize is expected to be a power-of-two larger than 4096 |
359 | */ |
360 | #define PAGE_SIZE_PTE(address, pagesize) \ |
361 | (((address) | ((pagesize) - 1)) & \ |
362 | (~(pagesize >> 1)) & PM_ADDR_MASK) |
363 | |
364 | /* |
365 | * Takes a PTE value with mode=0x07 and returns the page size it maps |
366 | */ |
367 | #define PTE_PAGE_SIZE(pte) \ |
368 | (1ULL << (1 + ffz(((pte) | 0xfffULL)))) |
369 | |
370 | /* |
371 | * Takes a page-table level and returns the default page-size for this level |
372 | */ |
373 | #define PTE_LEVEL_PAGE_SIZE(level) \ |
374 | (1ULL << (12 + (9 * (level)))) |
375 | |
376 | /* |
377 | * The IOPTE dirty bit |
378 | */ |
379 | #define IOMMU_PTE_HD_BIT (6) |
380 | |
381 | /* |
382 | * Bit value definition for I/O PTE fields |
383 | */ |
384 | #define IOMMU_PTE_PR BIT_ULL(0) |
385 | #define IOMMU_PTE_HD BIT_ULL(IOMMU_PTE_HD_BIT) |
386 | #define IOMMU_PTE_U BIT_ULL(59) |
387 | #define IOMMU_PTE_FC BIT_ULL(60) |
388 | #define IOMMU_PTE_IR BIT_ULL(61) |
389 | #define IOMMU_PTE_IW BIT_ULL(62) |
390 | |
391 | /* |
392 | * Bit value definition for DTE fields |
393 | */ |
394 | #define DTE_FLAG_V BIT_ULL(0) |
395 | #define DTE_FLAG_TV BIT_ULL(1) |
396 | #define DTE_FLAG_HAD (3ULL << 7) |
397 | #define DTE_FLAG_GIOV BIT_ULL(54) |
398 | #define DTE_FLAG_GV BIT_ULL(55) |
399 | #define DTE_GLX_SHIFT (56) |
400 | #define DTE_GLX_MASK (3) |
401 | #define DTE_FLAG_IR BIT_ULL(61) |
402 | #define DTE_FLAG_IW BIT_ULL(62) |
403 | |
404 | #define DTE_FLAG_IOTLB BIT_ULL(32) |
405 | #define DTE_FLAG_MASK (0x3ffULL << 32) |
406 | #define DEV_DOMID_MASK 0xffffULL |
407 | |
408 | #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) |
409 | #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) |
410 | #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) |
411 | |
412 | #define DTE_GCR3_INDEX_A 0 |
413 | #define DTE_GCR3_INDEX_B 1 |
414 | #define DTE_GCR3_INDEX_C 1 |
415 | |
416 | #define DTE_GCR3_SHIFT_A 58 |
417 | #define DTE_GCR3_SHIFT_B 16 |
418 | #define DTE_GCR3_SHIFT_C 43 |
419 | |
420 | #define DTE_GPT_LEVEL_SHIFT 54 |
421 | |
422 | #define GCR3_VALID 0x01ULL |
423 | |
424 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) |
425 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) |
426 | #define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD) |
427 | #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) |
428 | #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) |
429 | |
430 | #define IOMMU_PROT_MASK 0x03 |
431 | #define IOMMU_PROT_IR 0x01 |
432 | #define IOMMU_PROT_IW 0x02 |
433 | |
434 | #define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) |
435 | |
436 | /* IOMMU capabilities */ |
437 | #define IOMMU_CAP_IOTLB 24 |
438 | #define IOMMU_CAP_NPCACHE 26 |
439 | #define IOMMU_CAP_EFR 27 |
440 | |
441 | /* IOMMU IVINFO */ |
442 | #define IOMMU_IVINFO_OFFSET 36 |
443 | #define IOMMU_IVINFO_EFRSUP BIT(0) |
444 | #define IOMMU_IVINFO_DMA_REMAP BIT(1) |
445 | |
446 | /* IOMMU Feature Reporting Field (for IVHD type 10h */ |
447 | #define IOMMU_FEAT_GASUP_SHIFT 6 |
448 | |
449 | /* IOMMU Extended Feature Register (EFR) */ |
450 | #define IOMMU_EFR_XTSUP_SHIFT 2 |
451 | #define IOMMU_EFR_GASUP_SHIFT 7 |
452 | #define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46 |
453 | |
454 | #define MAX_DOMAIN_ID 65536 |
455 | |
456 | /* Timeout stuff */ |
457 | #define LOOP_TIMEOUT 100000 |
458 | #define MMIO_STATUS_TIMEOUT 2000000 |
459 | |
460 | extern bool amd_iommu_dump; |
461 | #define DUMP_printk(format, arg...) \ |
462 | do { \ |
463 | if (amd_iommu_dump) \ |
464 | pr_info("AMD-Vi: " format, ## arg); \ |
465 | } while(0); |
466 | |
467 | /* global flag if IOMMUs cache non-present entries */ |
468 | extern bool amd_iommu_np_cache; |
469 | /* Only true if all IOMMUs support device IOTLBs */ |
470 | extern bool amd_iommu_iotlb_sup; |
471 | |
472 | struct irq_remap_table { |
473 | raw_spinlock_t lock; |
474 | unsigned min_index; |
475 | u32 *table; |
476 | }; |
477 | |
478 | /* Interrupt remapping feature used? */ |
479 | extern bool amd_iommu_irq_remap; |
480 | |
481 | extern const struct iommu_ops amd_iommu_ops; |
482 | |
483 | /* IVRS indicates that pre-boot remapping was enabled */ |
484 | extern bool amdr_ivrs_remap_support; |
485 | |
486 | /* kmem_cache to get tables with 128 byte alignement */ |
487 | extern struct kmem_cache *amd_iommu_irq_cache; |
488 | |
489 | #define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff) |
490 | #define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff) |
491 | #define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \ |
492 | ((devid) & 0xffff)) |
493 | |
494 | /* Make iterating over all pci segment easier */ |
495 | #define for_each_pci_segment(pci_seg) \ |
496 | list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list) |
497 | #define for_each_pci_segment_safe(pci_seg, next) \ |
498 | list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list) |
499 | /* |
500 | * Make iterating over all IOMMUs easier |
501 | */ |
502 | #define for_each_iommu(iommu) \ |
503 | list_for_each_entry((iommu), &amd_iommu_list, list) |
504 | #define for_each_iommu_safe(iommu, next) \ |
505 | list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) |
506 | |
507 | struct amd_iommu; |
508 | struct iommu_domain; |
509 | struct irq_domain; |
510 | struct amd_irte_ops; |
511 | |
512 | #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) |
513 | |
514 | #define io_pgtable_to_data(x) \ |
515 | container_of((x), struct amd_io_pgtable, iop) |
516 | |
517 | #define io_pgtable_ops_to_data(x) \ |
518 | io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) |
519 | |
520 | #define io_pgtable_ops_to_domain(x) \ |
521 | container_of(io_pgtable_ops_to_data(x), \ |
522 | struct protection_domain, iop) |
523 | |
524 | #define io_pgtable_cfg_to_data(x) \ |
525 | container_of((x), struct amd_io_pgtable, pgtbl_cfg) |
526 | |
527 | struct gcr3_tbl_info { |
528 | u64 *gcr3_tbl; /* Guest CR3 table */ |
529 | int glx; /* Number of levels for GCR3 table */ |
530 | u32 pasid_cnt; /* Track attached PASIDs */ |
531 | u16 domid; /* Per device domain ID */ |
532 | }; |
533 | |
534 | struct amd_io_pgtable { |
535 | struct io_pgtable_cfg pgtbl_cfg; |
536 | struct io_pgtable iop; |
537 | int mode; |
538 | u64 *root; |
539 | u64 *pgd; /* v2 pgtable pgd pointer */ |
540 | }; |
541 | |
542 | enum protection_domain_mode { |
543 | PD_MODE_V1 = 1, |
544 | PD_MODE_V2, |
545 | }; |
546 | |
547 | /* |
548 | * This structure contains generic data for IOMMU protection domains |
549 | * independent of their use. |
550 | */ |
551 | struct protection_domain { |
552 | struct list_head dev_list; /* List of all devices in this domain */ |
553 | struct iommu_domain domain; /* generic domain handle used by |
554 | iommu core code */ |
555 | struct amd_io_pgtable iop; |
556 | spinlock_t lock; /* mostly used to lock the page table*/ |
557 | u16 id; /* the domain id written to the device table */ |
558 | int nid; /* Node ID */ |
559 | enum protection_domain_mode pd_mode; /* Track page table type */ |
560 | bool dirty_tracking; /* dirty tracking is enabled in the domain */ |
561 | unsigned dev_cnt; /* devices assigned to this domain */ |
562 | unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ |
563 | }; |
564 | |
565 | /* |
566 | * This structure contains information about one PCI segment in the system. |
567 | */ |
568 | struct amd_iommu_pci_seg { |
569 | /* List with all PCI segments in the system */ |
570 | struct list_head list; |
571 | |
572 | /* List of all available dev_data structures */ |
573 | struct llist_head dev_data_list; |
574 | |
575 | /* PCI segment number */ |
576 | u16 id; |
577 | |
578 | /* Largest PCI device id we expect translation requests for */ |
579 | u16 last_bdf; |
580 | |
581 | /* Size of the device table */ |
582 | u32 dev_table_size; |
583 | |
584 | /* Size of the alias table */ |
585 | u32 alias_table_size; |
586 | |
587 | /* Size of the rlookup table */ |
588 | u32 rlookup_table_size; |
589 | |
590 | /* |
591 | * device table virtual address |
592 | * |
593 | * Pointer to the per PCI segment device table. |
594 | * It is indexed by the PCI device id or the HT unit id and contains |
595 | * information about the domain the device belongs to as well as the |
596 | * page table root pointer. |
597 | */ |
598 | struct dev_table_entry *dev_table; |
599 | |
600 | /* |
601 | * The rlookup iommu table is used to find the IOMMU which is |
602 | * responsible for a specific device. It is indexed by the PCI |
603 | * device id. |
604 | */ |
605 | struct amd_iommu **rlookup_table; |
606 | |
607 | /* |
608 | * This table is used to find the irq remapping table for a given |
609 | * device id quickly. |
610 | */ |
611 | struct irq_remap_table **irq_lookup_table; |
612 | |
613 | /* |
614 | * Pointer to a device table which the content of old device table |
615 | * will be copied to. It's only be used in kdump kernel. |
616 | */ |
617 | struct dev_table_entry *old_dev_tbl_cpy; |
618 | |
619 | /* |
620 | * The alias table is a driver specific data structure which contains the |
621 | * mappings of the PCI device ids to the actual requestor ids on the IOMMU. |
622 | * More than one device can share the same requestor id. |
623 | */ |
624 | u16 *alias_table; |
625 | |
626 | /* |
627 | * A list of required unity mappings we find in ACPI. It is not locked |
628 | * because as runtime it is only read. It is created at ACPI table |
629 | * parsing time. |
630 | */ |
631 | struct list_head unity_map; |
632 | }; |
633 | |
634 | /* |
635 | * Structure where we save information about one hardware AMD IOMMU in the |
636 | * system. |
637 | */ |
638 | struct amd_iommu { |
639 | struct list_head list; |
640 | |
641 | /* Index within the IOMMU array */ |
642 | int index; |
643 | |
644 | /* locks the accesses to the hardware */ |
645 | raw_spinlock_t lock; |
646 | |
647 | /* Pointer to PCI device of this IOMMU */ |
648 | struct pci_dev *dev; |
649 | |
650 | /* Cache pdev to root device for resume quirks */ |
651 | struct pci_dev *root_pdev; |
652 | |
653 | /* physical address of MMIO space */ |
654 | u64 mmio_phys; |
655 | |
656 | /* physical end address of MMIO space */ |
657 | u64 mmio_phys_end; |
658 | |
659 | /* virtual address of MMIO space */ |
660 | u8 __iomem *mmio_base; |
661 | |
662 | /* capabilities of that IOMMU read from ACPI */ |
663 | u32 cap; |
664 | |
665 | /* flags read from acpi table */ |
666 | u8 acpi_flags; |
667 | |
668 | /* Extended features */ |
669 | u64 features; |
670 | |
671 | /* Extended features 2 */ |
672 | u64 features2; |
673 | |
674 | /* PCI device id of the IOMMU device */ |
675 | u16 devid; |
676 | |
677 | /* |
678 | * Capability pointer. There could be more than one IOMMU per PCI |
679 | * device function if there are more than one AMD IOMMU capability |
680 | * pointers. |
681 | */ |
682 | u16 cap_ptr; |
683 | |
684 | /* pci domain of this IOMMU */ |
685 | struct amd_iommu_pci_seg *pci_seg; |
686 | |
687 | /* start of exclusion range of that IOMMU */ |
688 | u64 exclusion_start; |
689 | /* length of exclusion range of that IOMMU */ |
690 | u64 exclusion_length; |
691 | |
692 | /* command buffer virtual address */ |
693 | u8 *cmd_buf; |
694 | u32 cmd_buf_head; |
695 | u32 cmd_buf_tail; |
696 | |
697 | /* event buffer virtual address */ |
698 | u8 *evt_buf; |
699 | |
700 | /* Name for event log interrupt */ |
701 | unsigned char evt_irq_name[16]; |
702 | |
703 | /* Base of the PPR log, if present */ |
704 | u8 *ppr_log; |
705 | |
706 | /* Name for PPR log interrupt */ |
707 | unsigned char ppr_irq_name[16]; |
708 | |
709 | /* Base of the GA log, if present */ |
710 | u8 *ga_log; |
711 | |
712 | /* Name for GA log interrupt */ |
713 | unsigned char ga_irq_name[16]; |
714 | |
715 | /* Tail of the GA log, if present */ |
716 | u8 *ga_log_tail; |
717 | |
718 | /* true if interrupts for this IOMMU are already enabled */ |
719 | bool int_enabled; |
720 | |
721 | /* if one, we need to send a completion wait command */ |
722 | bool need_sync; |
723 | |
724 | /* true if disable irte caching */ |
725 | bool irtcachedis_enabled; |
726 | |
727 | /* Handle for IOMMU core code */ |
728 | struct iommu_device iommu; |
729 | |
730 | /* |
731 | * We can't rely on the BIOS to restore all values on reinit, so we |
732 | * need to stash them |
733 | */ |
734 | |
735 | /* The iommu BAR */ |
736 | u32 stored_addr_lo; |
737 | u32 stored_addr_hi; |
738 | |
739 | /* |
740 | * Each iommu has 6 l1s, each of which is documented as having 0x12 |
741 | * registers |
742 | */ |
743 | u32 stored_l1[6][0x12]; |
744 | |
745 | /* The l2 indirect registers */ |
746 | u32 stored_l2[0x83]; |
747 | |
748 | /* The maximum PC banks and counters/bank (PCSup=1) */ |
749 | u8 max_banks; |
750 | u8 max_counters; |
751 | #ifdef CONFIG_IRQ_REMAP |
752 | struct irq_domain *ir_domain; |
753 | |
754 | struct amd_irte_ops *irte_ops; |
755 | #endif |
756 | |
757 | u32 flags; |
758 | volatile u64 *cmd_sem; |
759 | atomic64_t cmd_sem_val; |
760 | |
761 | #ifdef CONFIG_AMD_IOMMU_DEBUGFS |
762 | /* DebugFS Info */ |
763 | struct dentry *debugfs; |
764 | #endif |
765 | }; |
766 | |
767 | static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) |
768 | { |
769 | struct iommu_device *iommu = dev_to_iommu_device(dev); |
770 | |
771 | return container_of(iommu, struct amd_iommu, iommu); |
772 | } |
773 | |
774 | #define ACPIHID_UID_LEN 256 |
775 | #define ACPIHID_HID_LEN 9 |
776 | |
777 | struct acpihid_map_entry { |
778 | struct list_head list; |
779 | u8 uid[ACPIHID_UID_LEN]; |
780 | u8 hid[ACPIHID_HID_LEN]; |
781 | u32 devid; |
782 | u32 root_devid; |
783 | bool cmd_line; |
784 | struct iommu_group *group; |
785 | }; |
786 | |
787 | struct devid_map { |
788 | struct list_head list; |
789 | u8 id; |
790 | u32 devid; |
791 | bool cmd_line; |
792 | }; |
793 | |
794 | #define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */ |
795 | #define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */ |
796 | #define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */ |
797 | /* Device may request execution on memory pages */ |
798 | #define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 |
799 | /* Device may request super-user privileges */ |
800 | #define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 |
801 | |
802 | /* |
803 | * This struct contains device specific data for the IOMMU |
804 | */ |
805 | struct iommu_dev_data { |
806 | /*Protect against attach/detach races */ |
807 | spinlock_t lock; |
808 | |
809 | struct list_head list; /* For domain->dev_list */ |
810 | struct llist_node dev_data_list; /* For global dev_data_list */ |
811 | struct protection_domain *domain; /* Domain the device is bound to */ |
812 | struct gcr3_tbl_info gcr3_info; /* Per-device GCR3 table */ |
813 | struct device *dev; |
814 | u16 devid; /* PCI Device ID */ |
815 | |
816 | u32 flags; /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */ |
817 | int ats_qdep; |
818 | u8 ats_enabled :1; /* ATS state */ |
819 | u8 pri_enabled :1; /* PRI state */ |
820 | u8 pasid_enabled:1; /* PASID state */ |
821 | u8 pri_tlp :1; /* PASID TLB required for |
822 | PPR completions */ |
823 | u8 ppr :1; /* Enable device PPR support */ |
824 | bool use_vapic; /* Enable device to use vapic mode */ |
825 | bool defer_attach; |
826 | |
827 | struct ratelimit_state rs; /* Ratelimit IOPF messages */ |
828 | }; |
829 | |
830 | /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ |
831 | extern struct list_head ioapic_map; |
832 | extern struct list_head hpet_map; |
833 | extern struct list_head acpihid_map; |
834 | |
835 | /* |
836 | * List with all PCI segments in the system. This list is not locked because |
837 | * it is only written at driver initialization time |
838 | */ |
839 | extern struct list_head amd_iommu_pci_seg_list; |
840 | |
841 | /* |
842 | * List with all IOMMUs in the system. This list is not locked because it is |
843 | * only written and read at driver initialization or suspend time |
844 | */ |
845 | extern struct list_head amd_iommu_list; |
846 | |
847 | /* |
848 | * Array with pointers to each IOMMU struct |
849 | * The indices are referenced in the protection domains |
850 | */ |
851 | extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; |
852 | |
853 | /* |
854 | * Structure defining one entry in the device table |
855 | */ |
856 | struct dev_table_entry { |
857 | u64 data[4]; |
858 | }; |
859 | |
860 | /* |
861 | * One entry for unity mappings parsed out of the ACPI table. |
862 | */ |
863 | struct unity_map_entry { |
864 | struct list_head list; |
865 | |
866 | /* starting device id this entry is used for (including) */ |
867 | u16 devid_start; |
868 | /* end device id this entry is used for (including) */ |
869 | u16 devid_end; |
870 | |
871 | /* start address to unity map (including) */ |
872 | u64 address_start; |
873 | /* end address to unity map (including) */ |
874 | u64 address_end; |
875 | |
876 | /* required protection */ |
877 | int prot; |
878 | }; |
879 | |
880 | /* |
881 | * Data structures for device handling |
882 | */ |
883 | |
884 | /* size of the dma_ops aperture as power of 2 */ |
885 | extern unsigned amd_iommu_aperture_order; |
886 | |
887 | /* allocation bitmap for domain ids */ |
888 | extern unsigned long *amd_iommu_pd_alloc_bitmap; |
889 | |
890 | extern bool amd_iommu_force_isolation; |
891 | |
892 | /* Max levels of glxval supported */ |
893 | extern int amd_iommu_max_glx_val; |
894 | |
895 | /* Global EFR and EFR2 registers */ |
896 | extern u64 amd_iommu_efr; |
897 | extern u64 amd_iommu_efr2; |
898 | |
899 | static inline int get_ioapic_devid(int id) |
900 | { |
901 | struct devid_map *entry; |
902 | |
903 | list_for_each_entry(entry, &ioapic_map, list) { |
904 | if (entry->id == id) |
905 | return entry->devid; |
906 | } |
907 | |
908 | return -EINVAL; |
909 | } |
910 | |
911 | static inline int get_hpet_devid(int id) |
912 | { |
913 | struct devid_map *entry; |
914 | |
915 | list_for_each_entry(entry, &hpet_map, list) { |
916 | if (entry->id == id) |
917 | return entry->devid; |
918 | } |
919 | |
920 | return -EINVAL; |
921 | } |
922 | |
923 | enum amd_iommu_intr_mode_type { |
924 | AMD_IOMMU_GUEST_IR_LEGACY, |
925 | |
926 | /* This mode is not visible to users. It is used when |
927 | * we cannot fully enable vAPIC and fallback to only support |
928 | * legacy interrupt remapping via 128-bit IRTE. |
929 | */ |
930 | AMD_IOMMU_GUEST_IR_LEGACY_GA, |
931 | AMD_IOMMU_GUEST_IR_VAPIC, |
932 | }; |
933 | |
934 | #define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \ |
935 | x == AMD_IOMMU_GUEST_IR_LEGACY_GA) |
936 | |
937 | #define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC) |
938 | |
939 | union irte { |
940 | u32 val; |
941 | struct { |
942 | u32 valid : 1, |
943 | no_fault : 1, |
944 | int_type : 3, |
945 | rq_eoi : 1, |
946 | dm : 1, |
947 | rsvd_1 : 1, |
948 | destination : 8, |
949 | vector : 8, |
950 | rsvd_2 : 8; |
951 | } fields; |
952 | }; |
953 | |
954 | #define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff) |
955 | #define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff) |
956 | |
957 | union irte_ga_lo { |
958 | u64 val; |
959 | |
960 | /* For int remapping */ |
961 | struct { |
962 | u64 valid : 1, |
963 | no_fault : 1, |
964 | /* ------ */ |
965 | int_type : 3, |
966 | rq_eoi : 1, |
967 | dm : 1, |
968 | /* ------ */ |
969 | guest_mode : 1, |
970 | destination : 24, |
971 | ga_tag : 32; |
972 | } fields_remap; |
973 | |
974 | /* For guest vAPIC */ |
975 | struct { |
976 | u64 valid : 1, |
977 | no_fault : 1, |
978 | /* ------ */ |
979 | ga_log_intr : 1, |
980 | rsvd1 : 3, |
981 | is_run : 1, |
982 | /* ------ */ |
983 | guest_mode : 1, |
984 | destination : 24, |
985 | ga_tag : 32; |
986 | } fields_vapic; |
987 | }; |
988 | |
989 | union irte_ga_hi { |
990 | u64 val; |
991 | struct { |
992 | u64 vector : 8, |
993 | rsvd_1 : 4, |
994 | ga_root_ptr : 40, |
995 | rsvd_2 : 4, |
996 | destination : 8; |
997 | } fields; |
998 | }; |
999 | |
1000 | struct irte_ga { |
1001 | union { |
1002 | struct { |
1003 | union irte_ga_lo lo; |
1004 | union irte_ga_hi hi; |
1005 | }; |
1006 | u128 irte; |
1007 | }; |
1008 | }; |
1009 | |
1010 | struct irq_2_irte { |
1011 | u16 devid; /* Device ID for IRTE table */ |
1012 | u16 index; /* Index into IRTE table*/ |
1013 | }; |
1014 | |
1015 | struct amd_ir_data { |
1016 | u32 cached_ga_tag; |
1017 | struct amd_iommu *iommu; |
1018 | struct irq_2_irte irq_2_irte; |
1019 | struct msi_msg msi_entry; |
1020 | void *entry; /* Pointer to union irte or struct irte_ga */ |
1021 | |
1022 | /** |
1023 | * Store information for activate/de-activate |
1024 | * Guest virtual APIC mode during runtime. |
1025 | */ |
1026 | struct irq_cfg *cfg; |
1027 | int ga_vector; |
1028 | u64 ga_root_ptr; |
1029 | u32 ga_tag; |
1030 | }; |
1031 | |
1032 | struct amd_irte_ops { |
1033 | void (*prepare)(void *, u32, bool, u8, u32, int); |
1034 | void (*activate)(struct amd_iommu *iommu, void *, u16, u16); |
1035 | void (*deactivate)(struct amd_iommu *iommu, void *, u16, u16); |
1036 | void (*set_affinity)(struct amd_iommu *iommu, void *, u16, u16, u8, u32); |
1037 | void *(*get)(struct irq_remap_table *, int); |
1038 | void (*set_allocated)(struct irq_remap_table *, int); |
1039 | bool (*is_allocated)(struct irq_remap_table *, int); |
1040 | void (*clear_allocated)(struct irq_remap_table *, int); |
1041 | }; |
1042 | |
1043 | #ifdef CONFIG_IRQ_REMAP |
1044 | extern struct amd_irte_ops irte_32_ops; |
1045 | extern struct amd_irte_ops irte_128_ops; |
1046 | #endif |
1047 | |
1048 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ |
1049 | |