1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright © 2006-2015, Intel Corporation. |
4 | * |
5 | * Authors: Ashok Raj <ashok.raj@intel.com> |
6 | * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
7 | * David Woodhouse <David.Woodhouse@intel.com> |
8 | */ |
9 | |
10 | #ifndef _INTEL_IOMMU_H_ |
11 | #define _INTEL_IOMMU_H_ |
12 | |
13 | #include <linux/types.h> |
14 | #include <linux/iova.h> |
15 | #include <linux/io.h> |
16 | #include <linux/idr.h> |
17 | #include <linux/mmu_notifier.h> |
18 | #include <linux/list.h> |
19 | #include <linux/iommu.h> |
20 | #include <linux/io-64-nonatomic-lo-hi.h> |
21 | #include <linux/dmar.h> |
22 | #include <linux/bitfield.h> |
23 | #include <linux/xarray.h> |
24 | #include <linux/perf_event.h> |
25 | |
26 | #include <asm/cacheflush.h> |
27 | #include <asm/iommu.h> |
28 | #include <uapi/linux/iommufd.h> |
29 | |
30 | /* |
31 | * VT-d hardware uses 4KiB page size regardless of host page size. |
32 | */ |
33 | #define VTD_PAGE_SHIFT (12) |
34 | #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) |
35 | #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) |
36 | #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) |
37 | |
38 | #define VTD_STRIDE_SHIFT (9) |
39 | #define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) |
40 | |
41 | #define DMA_PTE_READ BIT_ULL(0) |
42 | #define DMA_PTE_WRITE BIT_ULL(1) |
43 | #define DMA_PTE_LARGE_PAGE BIT_ULL(7) |
44 | #define DMA_PTE_SNP BIT_ULL(11) |
45 | |
46 | #define DMA_FL_PTE_PRESENT BIT_ULL(0) |
47 | #define DMA_FL_PTE_US BIT_ULL(2) |
48 | #define DMA_FL_PTE_ACCESS BIT_ULL(5) |
49 | #define DMA_FL_PTE_DIRTY BIT_ULL(6) |
50 | #define DMA_FL_PTE_XD BIT_ULL(63) |
51 | |
52 | #define DMA_SL_PTE_DIRTY_BIT 9 |
53 | #define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT) |
54 | |
55 | #define ADDR_WIDTH_5LEVEL (57) |
56 | #define ADDR_WIDTH_4LEVEL (48) |
57 | |
58 | #define CONTEXT_TT_MULTI_LEVEL 0 |
59 | #define CONTEXT_TT_DEV_IOTLB 1 |
60 | #define CONTEXT_TT_PASS_THROUGH 2 |
61 | #define CONTEXT_PASIDE BIT_ULL(3) |
62 | |
63 | /* |
64 | * Intel IOMMU register specification per version 1.0 public spec. |
65 | */ |
66 | #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ |
67 | #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ |
68 | #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ |
69 | #define DMAR_GCMD_REG 0x18 /* Global command register */ |
70 | #define DMAR_GSTS_REG 0x1c /* Global status register */ |
71 | #define DMAR_RTADDR_REG 0x20 /* Root entry table */ |
72 | #define DMAR_CCMD_REG 0x28 /* Context command reg */ |
73 | #define DMAR_FSTS_REG 0x34 /* Fault Status register */ |
74 | #define DMAR_FECTL_REG 0x38 /* Fault control register */ |
75 | #define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ |
76 | #define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ |
77 | #define DMAR_FEUADDR_REG 0x44 /* Upper address register */ |
78 | #define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */ |
79 | #define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ |
80 | #define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ |
81 | #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ |
82 | #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ |
83 | #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ |
84 | #define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ |
85 | #define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ |
86 | #define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ |
87 | #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ |
88 | #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ |
89 | #define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */ |
90 | #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ |
91 | #define DMAR_PQH_REG 0xc0 /* Page request queue head register */ |
92 | #define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ |
93 | #define DMAR_PQA_REG 0xd0 /* Page request queue address register */ |
94 | #define DMAR_PRS_REG 0xdc /* Page request status register */ |
95 | #define DMAR_PECTL_REG 0xe0 /* Page request event control register */ |
96 | #define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ |
97 | #define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ |
98 | #define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ |
99 | #define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */ |
100 | #define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */ |
101 | #define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */ |
102 | #define DMAR_MTRR_FIX16K_80000_REG 0x128 |
103 | #define DMAR_MTRR_FIX16K_A0000_REG 0x130 |
104 | #define DMAR_MTRR_FIX4K_C0000_REG 0x138 |
105 | #define DMAR_MTRR_FIX4K_C8000_REG 0x140 |
106 | #define DMAR_MTRR_FIX4K_D0000_REG 0x148 |
107 | #define DMAR_MTRR_FIX4K_D8000_REG 0x150 |
108 | #define DMAR_MTRR_FIX4K_E0000_REG 0x158 |
109 | #define DMAR_MTRR_FIX4K_E8000_REG 0x160 |
110 | #define DMAR_MTRR_FIX4K_F0000_REG 0x168 |
111 | #define DMAR_MTRR_FIX4K_F8000_REG 0x170 |
112 | #define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */ |
113 | #define DMAR_MTRR_PHYSMASK0_REG 0x188 |
114 | #define DMAR_MTRR_PHYSBASE1_REG 0x190 |
115 | #define DMAR_MTRR_PHYSMASK1_REG 0x198 |
116 | #define DMAR_MTRR_PHYSBASE2_REG 0x1a0 |
117 | #define DMAR_MTRR_PHYSMASK2_REG 0x1a8 |
118 | #define DMAR_MTRR_PHYSBASE3_REG 0x1b0 |
119 | #define DMAR_MTRR_PHYSMASK3_REG 0x1b8 |
120 | #define DMAR_MTRR_PHYSBASE4_REG 0x1c0 |
121 | #define DMAR_MTRR_PHYSMASK4_REG 0x1c8 |
122 | #define DMAR_MTRR_PHYSBASE5_REG 0x1d0 |
123 | #define DMAR_MTRR_PHYSMASK5_REG 0x1d8 |
124 | #define DMAR_MTRR_PHYSBASE6_REG 0x1e0 |
125 | #define DMAR_MTRR_PHYSMASK6_REG 0x1e8 |
126 | #define DMAR_MTRR_PHYSBASE7_REG 0x1f0 |
127 | #define DMAR_MTRR_PHYSMASK7_REG 0x1f8 |
128 | #define DMAR_MTRR_PHYSBASE8_REG 0x200 |
129 | #define DMAR_MTRR_PHYSMASK8_REG 0x208 |
130 | #define DMAR_MTRR_PHYSBASE9_REG 0x210 |
131 | #define DMAR_MTRR_PHYSMASK9_REG 0x218 |
132 | #define DMAR_PERFCAP_REG 0x300 |
133 | #define DMAR_PERFCFGOFF_REG 0x310 |
134 | #define DMAR_PERFOVFOFF_REG 0x318 |
135 | #define DMAR_PERFCNTROFF_REG 0x31c |
136 | #define DMAR_PERFINTRSTS_REG 0x324 |
137 | #define DMAR_PERFINTRCTL_REG 0x328 |
138 | #define DMAR_PERFEVNTCAP_REG 0x380 |
139 | #define DMAR_ECMD_REG 0x400 |
140 | #define DMAR_ECEO_REG 0x408 |
141 | #define DMAR_ECRSP_REG 0x410 |
142 | #define DMAR_ECCAP_REG 0x430 |
143 | |
144 | #define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) |
145 | #define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) |
146 | #define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg) |
147 | |
148 | #define OFFSET_STRIDE (9) |
149 | |
150 | #define dmar_readq(a) readq(a) |
151 | #define dmar_writeq(a,v) writeq(v,a) |
152 | #define dmar_readl(a) readl(a) |
153 | #define dmar_writel(a, v) writel(v, a) |
154 | |
155 | #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) |
156 | #define DMAR_VER_MINOR(v) ((v) & 0x0f) |
157 | |
158 | /* |
159 | * Decoding Capability Register |
160 | */ |
161 | #define cap_esrtps(c) (((c) >> 63) & 1) |
162 | #define cap_esirtps(c) (((c) >> 62) & 1) |
163 | #define cap_ecmds(c) (((c) >> 61) & 1) |
164 | #define cap_fl5lp_support(c) (((c) >> 60) & 1) |
165 | #define cap_pi_support(c) (((c) >> 59) & 1) |
166 | #define cap_fl1gp_support(c) (((c) >> 56) & 1) |
167 | #define cap_read_drain(c) (((c) >> 55) & 1) |
168 | #define cap_write_drain(c) (((c) >> 54) & 1) |
169 | #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) |
170 | #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) |
171 | #define cap_pgsel_inv(c) (((c) >> 39) & 1) |
172 | |
173 | #define cap_super_page_val(c) (((c) >> 34) & 0xf) |
174 | #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ |
175 | * OFFSET_STRIDE) + 21) |
176 | |
177 | #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) |
178 | #define cap_max_fault_reg_offset(c) \ |
179 | (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) |
180 | |
181 | #define cap_zlr(c) (((c) >> 22) & 1) |
182 | #define cap_isoch(c) (((c) >> 23) & 1) |
183 | #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) |
184 | #define cap_sagaw(c) (((c) >> 8) & 0x1f) |
185 | #define cap_caching_mode(c) (((c) >> 7) & 1) |
186 | #define cap_phmr(c) (((c) >> 6) & 1) |
187 | #define cap_plmr(c) (((c) >> 5) & 1) |
188 | #define cap_rwbf(c) (((c) >> 4) & 1) |
189 | #define cap_afl(c) (((c) >> 3) & 1) |
190 | #define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) |
191 | /* |
192 | * Extended Capability Register |
193 | */ |
194 | |
195 | #define ecap_pms(e) (((e) >> 51) & 0x1) |
196 | #define ecap_rps(e) (((e) >> 49) & 0x1) |
197 | #define ecap_smpwc(e) (((e) >> 48) & 0x1) |
198 | #define ecap_flts(e) (((e) >> 47) & 0x1) |
199 | #define ecap_slts(e) (((e) >> 46) & 0x1) |
200 | #define ecap_slads(e) (((e) >> 45) & 0x1) |
201 | #define ecap_smts(e) (((e) >> 43) & 0x1) |
202 | #define ecap_dit(e) (((e) >> 41) & 0x1) |
203 | #define ecap_pds(e) (((e) >> 42) & 0x1) |
204 | #define ecap_pasid(e) (((e) >> 40) & 0x1) |
205 | #define ecap_pss(e) (((e) >> 35) & 0x1f) |
206 | #define ecap_eafs(e) (((e) >> 34) & 0x1) |
207 | #define ecap_nwfs(e) (((e) >> 33) & 0x1) |
208 | #define ecap_srs(e) (((e) >> 31) & 0x1) |
209 | #define ecap_ers(e) (((e) >> 30) & 0x1) |
210 | #define ecap_prs(e) (((e) >> 29) & 0x1) |
211 | #define ecap_broken_pasid(e) (((e) >> 28) & 0x1) |
212 | #define ecap_dis(e) (((e) >> 27) & 0x1) |
213 | #define ecap_nest(e) (((e) >> 26) & 0x1) |
214 | #define ecap_mts(e) (((e) >> 25) & 0x1) |
215 | #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) |
216 | #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) |
217 | #define ecap_coherent(e) ((e) & 0x1) |
218 | #define ecap_qis(e) ((e) & 0x2) |
219 | #define ecap_pass_through(e) (((e) >> 6) & 0x1) |
220 | #define ecap_eim_support(e) (((e) >> 4) & 0x1) |
221 | #define ecap_ir_support(e) (((e) >> 3) & 0x1) |
222 | #define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) |
223 | #define ecap_max_handle_mask(e) (((e) >> 20) & 0xf) |
224 | #define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */ |
225 | |
226 | /* |
227 | * Decoding Perf Capability Register |
228 | */ |
229 | #define pcap_num_cntr(p) ((p) & 0xffff) |
230 | #define pcap_cntr_width(p) (((p) >> 16) & 0x7f) |
231 | #define pcap_num_event_group(p) (((p) >> 24) & 0x1f) |
232 | #define pcap_filters_mask(p) (((p) >> 32) & 0x1f) |
233 | #define pcap_interrupt(p) (((p) >> 50) & 0x1) |
234 | /* The counter stride is calculated as 2 ^ (x+10) bytes */ |
235 | #define pcap_cntr_stride(p) (1ULL << ((((p) >> 52) & 0x7) + 10)) |
236 | |
237 | /* |
238 | * Decoding Perf Event Capability Register |
239 | */ |
240 | #define pecap_es(p) ((p) & 0xfffffff) |
241 | |
242 | /* Virtual command interface capability */ |
243 | #define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */ |
244 | |
245 | /* IOTLB_REG */ |
246 | #define DMA_TLB_FLUSH_GRANU_OFFSET 60 |
247 | #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) |
248 | #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) |
249 | #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) |
250 | #define DMA_TLB_IIRG(type) ((type >> 60) & 3) |
251 | #define DMA_TLB_IAIG(val) (((val) >> 57) & 3) |
252 | #define DMA_TLB_READ_DRAIN (((u64)1) << 49) |
253 | #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) |
254 | #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) |
255 | #define DMA_TLB_IVT (((u64)1) << 63) |
256 | #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) |
257 | #define DMA_TLB_MAX_SIZE (0x3f) |
258 | |
259 | /* INVALID_DESC */ |
260 | #define DMA_CCMD_INVL_GRANU_OFFSET 61 |
261 | #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4) |
262 | #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4) |
263 | #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4) |
264 | #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) |
265 | #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) |
266 | #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) |
267 | #define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6) |
268 | #define DMA_ID_TLB_ADDR(addr) (addr) |
269 | #define DMA_ID_TLB_ADDR_MASK(mask) (mask) |
270 | |
271 | /* PMEN_REG */ |
272 | #define DMA_PMEN_EPM (((u32)1)<<31) |
273 | #define DMA_PMEN_PRS (((u32)1)<<0) |
274 | |
275 | /* GCMD_REG */ |
276 | #define DMA_GCMD_TE (((u32)1) << 31) |
277 | #define DMA_GCMD_SRTP (((u32)1) << 30) |
278 | #define DMA_GCMD_SFL (((u32)1) << 29) |
279 | #define DMA_GCMD_EAFL (((u32)1) << 28) |
280 | #define DMA_GCMD_WBF (((u32)1) << 27) |
281 | #define DMA_GCMD_QIE (((u32)1) << 26) |
282 | #define DMA_GCMD_SIRTP (((u32)1) << 24) |
283 | #define DMA_GCMD_IRE (((u32) 1) << 25) |
284 | #define DMA_GCMD_CFI (((u32) 1) << 23) |
285 | |
286 | /* GSTS_REG */ |
287 | #define DMA_GSTS_TES (((u32)1) << 31) |
288 | #define DMA_GSTS_RTPS (((u32)1) << 30) |
289 | #define DMA_GSTS_FLS (((u32)1) << 29) |
290 | #define DMA_GSTS_AFLS (((u32)1) << 28) |
291 | #define DMA_GSTS_WBFS (((u32)1) << 27) |
292 | #define DMA_GSTS_QIES (((u32)1) << 26) |
293 | #define DMA_GSTS_IRTPS (((u32)1) << 24) |
294 | #define DMA_GSTS_IRES (((u32)1) << 25) |
295 | #define DMA_GSTS_CFIS (((u32)1) << 23) |
296 | |
297 | /* DMA_RTADDR_REG */ |
298 | #define DMA_RTADDR_SMT (((u64)1) << 10) |
299 | |
300 | /* CCMD_REG */ |
301 | #define DMA_CCMD_ICC (((u64)1) << 63) |
302 | #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) |
303 | #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) |
304 | #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) |
305 | #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) |
306 | #define DMA_CCMD_MASK_NOBIT 0 |
307 | #define DMA_CCMD_MASK_1BIT 1 |
308 | #define DMA_CCMD_MASK_2BIT 2 |
309 | #define DMA_CCMD_MASK_3BIT 3 |
310 | #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) |
311 | #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) |
312 | |
313 | /* ECMD_REG */ |
314 | #define DMA_MAX_NUM_ECMD 256 |
315 | #define DMA_MAX_NUM_ECMDCAP (DMA_MAX_NUM_ECMD / 64) |
316 | #define DMA_ECMD_REG_STEP 8 |
317 | #define DMA_ECMD_ENABLE 0xf0 |
318 | #define DMA_ECMD_DISABLE 0xf1 |
319 | #define DMA_ECMD_FREEZE 0xf4 |
320 | #define DMA_ECMD_UNFREEZE 0xf5 |
321 | #define DMA_ECMD_OA_SHIFT 16 |
322 | #define DMA_ECMD_ECRSP_IP 0x1 |
323 | #define DMA_ECMD_ECCAP3 3 |
324 | #define DMA_ECMD_ECCAP3_ECNTS BIT_ULL(48) |
325 | #define DMA_ECMD_ECCAP3_DCNTS BIT_ULL(49) |
326 | #define DMA_ECMD_ECCAP3_FCNTS BIT_ULL(52) |
327 | #define DMA_ECMD_ECCAP3_UFCNTS BIT_ULL(53) |
328 | #define DMA_ECMD_ECCAP3_ESSENTIAL (DMA_ECMD_ECCAP3_ECNTS | \ |
329 | DMA_ECMD_ECCAP3_DCNTS | \ |
330 | DMA_ECMD_ECCAP3_FCNTS | \ |
331 | DMA_ECMD_ECCAP3_UFCNTS) |
332 | |
333 | /* FECTL_REG */ |
334 | #define DMA_FECTL_IM (((u32)1) << 31) |
335 | |
336 | /* FSTS_REG */ |
337 | #define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ |
338 | #define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ |
339 | #define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ |
340 | #define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ |
341 | #define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ |
342 | #define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ |
343 | #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) |
344 | |
345 | /* FRCD_REG, 32 bits access */ |
346 | #define DMA_FRCD_F (((u32)1) << 31) |
347 | #define dma_frcd_type(d) ((d >> 30) & 1) |
348 | #define dma_frcd_fault_reason(c) (c & 0xff) |
349 | #define dma_frcd_source_id(c) (c & 0xffff) |
350 | #define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) |
351 | #define dma_frcd_pasid_present(c) (((c) >> 31) & 1) |
352 | /* low 64 bit */ |
353 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) |
354 | |
355 | /* PRS_REG */ |
356 | #define DMA_PRS_PPR ((u32)1) |
357 | #define DMA_PRS_PRO ((u32)2) |
358 | |
359 | #define DMA_VCS_PAS ((u64)1) |
360 | |
361 | /* PERFINTRSTS_REG */ |
362 | #define DMA_PERFINTRSTS_PIS ((u32)1) |
363 | |
364 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ |
365 | do { \ |
366 | cycles_t start_time = get_cycles(); \ |
367 | while (1) { \ |
368 | sts = op(iommu->reg + offset); \ |
369 | if (cond) \ |
370 | break; \ |
371 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ |
372 | panic("DMAR hardware is malfunctioning\n"); \ |
373 | cpu_relax(); \ |
374 | } \ |
375 | } while (0) |
376 | |
377 | #define QI_LENGTH 256 /* queue length */ |
378 | |
379 | enum { |
380 | QI_FREE, |
381 | QI_IN_USE, |
382 | QI_DONE, |
383 | QI_ABORT |
384 | }; |
385 | |
386 | #define QI_CC_TYPE 0x1 |
387 | #define QI_IOTLB_TYPE 0x2 |
388 | #define QI_DIOTLB_TYPE 0x3 |
389 | #define QI_IEC_TYPE 0x4 |
390 | #define QI_IWD_TYPE 0x5 |
391 | #define QI_EIOTLB_TYPE 0x6 |
392 | #define QI_PC_TYPE 0x7 |
393 | #define QI_DEIOTLB_TYPE 0x8 |
394 | #define QI_PGRP_RESP_TYPE 0x9 |
395 | #define QI_PSTRM_RESP_TYPE 0xa |
396 | |
397 | #define QI_IEC_SELECTIVE (((u64)1) << 4) |
398 | #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) |
399 | #define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27)) |
400 | |
401 | #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) |
402 | #define QI_IWD_STATUS_WRITE (((u64)1) << 5) |
403 | #define QI_IWD_FENCE (((u64)1) << 6) |
404 | #define QI_IWD_PRQ_DRAIN (((u64)1) << 7) |
405 | |
406 | #define QI_IOTLB_DID(did) (((u64)did) << 16) |
407 | #define QI_IOTLB_DR(dr) (((u64)dr) << 7) |
408 | #define QI_IOTLB_DW(dw) (((u64)dw) << 6) |
409 | #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) |
410 | #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) |
411 | #define QI_IOTLB_IH(ih) (((u64)ih) << 6) |
412 | #define QI_IOTLB_AM(am) (((u8)am) & 0x3f) |
413 | |
414 | #define QI_CC_FM(fm) (((u64)fm) << 48) |
415 | #define QI_CC_SID(sid) (((u64)sid) << 32) |
416 | #define QI_CC_DID(did) (((u64)did) << 16) |
417 | #define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) |
418 | |
419 | #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) |
420 | #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) |
421 | #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) |
422 | #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ |
423 | ((u64)((pfsid >> 4) & 0xfff) << 52)) |
424 | #define QI_DEV_IOTLB_SIZE 1 |
425 | #define QI_DEV_IOTLB_MAX_INVS 32 |
426 | |
427 | #define QI_PC_PASID(pasid) (((u64)pasid) << 32) |
428 | #define QI_PC_DID(did) (((u64)did) << 16) |
429 | #define QI_PC_GRAN(gran) (((u64)gran) << 4) |
430 | |
431 | /* PASID cache invalidation granu */ |
432 | #define QI_PC_ALL_PASIDS 0 |
433 | #define QI_PC_PASID_SEL 1 |
434 | #define QI_PC_GLOBAL 3 |
435 | |
436 | #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) |
437 | #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) |
438 | #define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) |
439 | #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) |
440 | #define QI_EIOTLB_DID(did) (((u64)did) << 16) |
441 | #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) |
442 | |
443 | /* QI Dev-IOTLB inv granu */ |
444 | #define QI_DEV_IOTLB_GRAN_ALL 1 |
445 | #define QI_DEV_IOTLB_GRAN_PASID_SEL 0 |
446 | |
447 | #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) |
448 | #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) |
449 | #define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) |
450 | #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) |
451 | #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) |
452 | #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ |
453 | ((u64)((pfsid >> 4) & 0xfff) << 52)) |
454 | #define QI_DEV_EIOTLB_MAX_INVS 32 |
455 | |
456 | /* Page group response descriptor QW0 */ |
457 | #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) |
458 | #define QI_PGRP_PDP(p) (((u64)(p)) << 5) |
459 | #define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12) |
460 | #define QI_PGRP_DID(rid) (((u64)(rid)) << 16) |
461 | #define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) |
462 | |
463 | /* Page group response descriptor QW1 */ |
464 | #define QI_PGRP_LPIG(x) (((u64)(x)) << 2) |
465 | #define QI_PGRP_IDX(idx) (((u64)(idx)) << 3) |
466 | |
467 | |
468 | #define QI_RESP_SUCCESS 0x0 |
469 | #define QI_RESP_INVALID 0x1 |
470 | #define QI_RESP_FAILURE 0xf |
471 | |
472 | #define QI_GRAN_NONG_PASID 2 |
473 | #define QI_GRAN_PSI_PASID 3 |
474 | |
475 | #define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) |
476 | |
477 | struct qi_desc { |
478 | u64 qw0; |
479 | u64 qw1; |
480 | u64 qw2; |
481 | u64 qw3; |
482 | }; |
483 | |
484 | struct q_inval { |
485 | raw_spinlock_t q_lock; |
486 | void *desc; /* invalidation queue */ |
487 | int *desc_status; /* desc status */ |
488 | int free_head; /* first free entry */ |
489 | int free_tail; /* last free entry */ |
490 | int free_cnt; |
491 | }; |
492 | |
493 | /* Page Request Queue depth */ |
494 | #define PRQ_ORDER 4 |
495 | #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) |
496 | #define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5) |
497 | |
498 | struct dmar_pci_notify_info; |
499 | |
500 | #ifdef CONFIG_IRQ_REMAP |
501 | /* 1MB - maximum possible interrupt remapping table size */ |
502 | #define INTR_REMAP_PAGE_ORDER 8 |
503 | #define INTR_REMAP_TABLE_REG_SIZE 0xf |
504 | #define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf |
505 | |
506 | #define INTR_REMAP_TABLE_ENTRIES 65536 |
507 | |
508 | struct irq_domain; |
509 | |
510 | struct ir_table { |
511 | struct irte *base; |
512 | unsigned long *bitmap; |
513 | }; |
514 | |
515 | void intel_irq_remap_add_device(struct dmar_pci_notify_info *info); |
516 | #else |
517 | static inline void |
518 | intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { } |
519 | #endif |
520 | |
521 | struct iommu_flush { |
522 | void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, |
523 | u8 fm, u64 type); |
524 | void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, |
525 | unsigned int size_order, u64 type); |
526 | }; |
527 | |
528 | enum { |
529 | SR_DMAR_FECTL_REG, |
530 | SR_DMAR_FEDATA_REG, |
531 | SR_DMAR_FEADDR_REG, |
532 | SR_DMAR_FEUADDR_REG, |
533 | MAX_SR_DMAR_REGS |
534 | }; |
535 | |
536 | #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) |
537 | #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) |
538 | #define VTD_FLAG_SVM_CAPABLE (1 << 2) |
539 | |
540 | #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) |
541 | #define pasid_supported(iommu) (sm_supported(iommu) && \ |
542 | ecap_pasid((iommu)->ecap)) |
543 | #define ssads_supported(iommu) (sm_supported(iommu) && \ |
544 | ecap_slads((iommu)->ecap)) |
545 | #define nested_supported(iommu) (sm_supported(iommu) && \ |
546 | ecap_nest((iommu)->ecap)) |
547 | |
548 | struct pasid_entry; |
549 | struct pasid_state_entry; |
550 | struct page_req_dsc; |
551 | |
552 | /* |
553 | * 0: Present |
554 | * 1-11: Reserved |
555 | * 12-63: Context Ptr (12 - (haw-1)) |
556 | * 64-127: Reserved |
557 | */ |
558 | struct root_entry { |
559 | u64 lo; |
560 | u64 hi; |
561 | }; |
562 | |
563 | /* |
564 | * low 64 bits: |
565 | * 0: present |
566 | * 1: fault processing disable |
567 | * 2-3: translation type |
568 | * 12-63: address space root |
569 | * high 64 bits: |
570 | * 0-2: address width |
571 | * 3-6: aval |
572 | * 8-23: domain id |
573 | */ |
574 | struct context_entry { |
575 | u64 lo; |
576 | u64 hi; |
577 | }; |
578 | |
579 | struct iommu_domain_info { |
580 | struct intel_iommu *iommu; |
581 | unsigned int refcnt; /* Refcount of devices per iommu */ |
582 | u16 did; /* Domain ids per IOMMU. Use u16 since |
583 | * domain ids are 16 bit wide according |
584 | * to VT-d spec, section 9.3 */ |
585 | }; |
586 | |
587 | struct dmar_domain { |
588 | int nid; /* node id */ |
589 | struct xarray iommu_array; /* Attached IOMMU array */ |
590 | |
591 | u8 has_iotlb_device: 1; |
592 | u8 iommu_coherency: 1; /* indicate coherency of iommu access */ |
593 | u8 force_snooping : 1; /* Create IOPTEs with snoop control */ |
594 | u8 set_pte_snp:1; |
595 | u8 use_first_level:1; /* DMA translation for the domain goes |
596 | * through the first level page table, |
597 | * otherwise, goes through the second |
598 | * level. |
599 | */ |
600 | u8 dirty_tracking:1; /* Dirty tracking is enabled */ |
601 | u8 nested_parent:1; /* Has other domains nested on it */ |
602 | u8 has_mappings:1; /* Has mappings configured through |
603 | * iommu_map() interface. |
604 | */ |
605 | |
606 | spinlock_t lock; /* Protect device tracking lists */ |
607 | struct list_head devices; /* all devices' list */ |
608 | struct list_head dev_pasids; /* all attached pasids */ |
609 | |
610 | int iommu_superpage;/* Level of superpages supported: |
611 | 0 == 4KiB (no superpages), 1 == 2MiB, |
612 | 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ |
613 | union { |
614 | /* DMA remapping domain */ |
615 | struct { |
616 | /* virtual address */ |
617 | struct dma_pte *pgd; |
618 | /* max guest address width */ |
619 | int gaw; |
620 | /* |
621 | * adjusted guest address width: |
622 | * 0: level 2 30-bit |
623 | * 1: level 3 39-bit |
624 | * 2: level 4 48-bit |
625 | * 3: level 5 57-bit |
626 | */ |
627 | int agaw; |
628 | /* maximum mapped address */ |
629 | u64 max_addr; |
630 | /* Protect the s1_domains list */ |
631 | spinlock_t s1_lock; |
632 | /* Track s1_domains nested on this domain */ |
633 | struct list_head s1_domains; |
634 | }; |
635 | |
636 | /* Nested user domain */ |
637 | struct { |
638 | /* parent page table which the user domain is nested on */ |
639 | struct dmar_domain *s2_domain; |
640 | /* user page table pointer (in GPA) */ |
641 | unsigned long s1_pgtbl; |
642 | /* page table attributes */ |
643 | struct iommu_hwpt_vtd_s1 s1_cfg; |
644 | /* link to parent domain siblings */ |
645 | struct list_head s2_link; |
646 | }; |
647 | }; |
648 | |
649 | struct iommu_domain domain; /* generic domain data structure for |
650 | iommu core */ |
651 | }; |
652 | |
653 | /* |
654 | * In theory, the VT-d 4.0 spec can support up to 2 ^ 16 counters. |
655 | * But in practice, there are only 14 counters for the existing |
656 | * platform. Setting the max number of counters to 64 should be good |
657 | * enough for a long time. Also, supporting more than 64 counters |
658 | * requires more extras, e.g., extra freeze and overflow registers, |
659 | * which is not necessary for now. |
660 | */ |
661 | #define IOMMU_PMU_IDX_MAX 64 |
662 | |
663 | struct iommu_pmu { |
664 | struct intel_iommu *iommu; |
665 | u32 num_cntr; /* Number of counters */ |
666 | u32 num_eg; /* Number of event group */ |
667 | u32 cntr_width; /* Counter width */ |
668 | u32 cntr_stride; /* Counter Stride */ |
669 | u32 filter; /* Bitmask of filter support */ |
670 | void __iomem *base; /* the PerfMon base address */ |
671 | void __iomem *cfg_reg; /* counter configuration base address */ |
672 | void __iomem *cntr_reg; /* counter 0 address*/ |
673 | void __iomem *overflow; /* overflow status register */ |
674 | |
675 | u64 *evcap; /* Indicates all supported events */ |
676 | u32 **cntr_evcap; /* Supported events of each counter. */ |
677 | |
678 | struct pmu pmu; |
679 | DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX); |
680 | struct perf_event *event_list[IOMMU_PMU_IDX_MAX]; |
681 | unsigned char irq_name[16]; |
682 | struct hlist_node cpuhp_node; |
683 | int cpu; |
684 | }; |
685 | |
686 | #define IOMMU_IRQ_ID_OFFSET_PRQ (DMAR_UNITS_SUPPORTED) |
687 | #define IOMMU_IRQ_ID_OFFSET_PERF (2 * DMAR_UNITS_SUPPORTED) |
688 | |
689 | struct intel_iommu { |
690 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
691 | u64 reg_phys; /* physical address of hw register set */ |
692 | u64 reg_size; /* size of hw register set */ |
693 | u64 cap; |
694 | u64 ecap; |
695 | u64 vccap; |
696 | u64 ecmdcap[DMA_MAX_NUM_ECMDCAP]; |
697 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ |
698 | raw_spinlock_t register_lock; /* protect register handling */ |
699 | int seq_id; /* sequence id of the iommu */ |
700 | int agaw; /* agaw of this iommu */ |
701 | int msagaw; /* max sagaw of this iommu */ |
702 | unsigned int irq, pr_irq, perf_irq; |
703 | u16 segment; /* PCI segment# */ |
704 | unsigned char name[13]; /* Device Name */ |
705 | |
706 | #ifdef CONFIG_INTEL_IOMMU |
707 | unsigned long *domain_ids; /* bitmap of domains */ |
708 | unsigned long *copied_tables; /* bitmap of copied tables */ |
709 | spinlock_t lock; /* protect context, domain ids */ |
710 | struct root_entry *root_entry; /* virtual address */ |
711 | |
712 | struct iommu_flush flush; |
713 | #endif |
714 | #ifdef CONFIG_INTEL_IOMMU_SVM |
715 | struct page_req_dsc *prq; |
716 | unsigned char prq_name[16]; /* Name for PRQ interrupt */ |
717 | unsigned long prq_seq_number; |
718 | struct completion prq_complete; |
719 | #endif |
720 | struct iopf_queue *iopf_queue; |
721 | unsigned char iopfq_name[16]; |
722 | /* Synchronization between fault report and iommu device release. */ |
723 | struct mutex iopf_lock; |
724 | struct q_inval *qi; /* Queued invalidation info */ |
725 | u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/ |
726 | |
727 | /* rb tree for all probed devices */ |
728 | struct rb_root device_rbtree; |
729 | /* protect the device_rbtree */ |
730 | spinlock_t device_rbtree_lock; |
731 | |
732 | #ifdef CONFIG_IRQ_REMAP |
733 | struct ir_table *ir_table; /* Interrupt remapping info */ |
734 | struct irq_domain *ir_domain; |
735 | #endif |
736 | struct iommu_device iommu; /* IOMMU core code handle */ |
737 | int node; |
738 | u32 flags; /* Software defined flags */ |
739 | |
740 | struct dmar_drhd_unit *drhd; |
741 | void *perf_statistic; |
742 | |
743 | struct iommu_pmu *pmu; |
744 | }; |
745 | |
746 | /* PCI domain-device relationship */ |
747 | struct device_domain_info { |
748 | struct list_head link; /* link to domain siblings */ |
749 | u32 segment; /* PCI segment number */ |
750 | u8 bus; /* PCI bus number */ |
751 | u8 devfn; /* PCI devfn number */ |
752 | u16 pfsid; /* SRIOV physical function source ID */ |
753 | u8 pasid_supported:3; |
754 | u8 pasid_enabled:1; |
755 | u8 pri_supported:1; |
756 | u8 pri_enabled:1; |
757 | u8 ats_supported:1; |
758 | u8 ats_enabled:1; |
759 | u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */ |
760 | u8 ats_qdep; |
761 | struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ |
762 | struct intel_iommu *iommu; /* IOMMU used by this device */ |
763 | struct dmar_domain *domain; /* pointer to domain */ |
764 | struct pasid_table *pasid_table; /* pasid table */ |
765 | /* device tracking node(lookup by PCI RID) */ |
766 | struct rb_node node; |
767 | #ifdef CONFIG_INTEL_IOMMU_DEBUGFS |
768 | struct dentry *debugfs_dentry; /* pointer to device directory dentry */ |
769 | #endif |
770 | }; |
771 | |
772 | struct dev_pasid_info { |
773 | struct list_head link_domain; /* link to domain siblings */ |
774 | struct device *dev; |
775 | ioasid_t pasid; |
776 | #ifdef CONFIG_INTEL_IOMMU_DEBUGFS |
777 | struct dentry *debugfs_dentry; /* pointer to pasid directory dentry */ |
778 | #endif |
779 | }; |
780 | |
781 | static inline void __iommu_flush_cache( |
782 | struct intel_iommu *iommu, void *addr, int size) |
783 | { |
784 | if (!ecap_coherent(iommu->ecap)) |
785 | clflush_cache_range(addr, size); |
786 | } |
787 | |
788 | /* Convert generic struct iommu_domain to private struct dmar_domain */ |
789 | static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) |
790 | { |
791 | return container_of(dom, struct dmar_domain, domain); |
792 | } |
793 | |
794 | /* Retrieve the domain ID which has allocated to the domain */ |
795 | static inline u16 |
796 | domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) |
797 | { |
798 | struct iommu_domain_info *info = |
799 | xa_load(&domain->iommu_array, index: iommu->seq_id); |
800 | |
801 | return info->did; |
802 | } |
803 | |
804 | /* |
805 | * 0: readable |
806 | * 1: writable |
807 | * 2-6: reserved |
808 | * 7: super page |
809 | * 8-10: available |
810 | * 11: snoop behavior |
811 | * 12-63: Host physical address |
812 | */ |
813 | struct dma_pte { |
814 | u64 val; |
815 | }; |
816 | |
817 | static inline void dma_clear_pte(struct dma_pte *pte) |
818 | { |
819 | pte->val = 0; |
820 | } |
821 | |
822 | static inline u64 dma_pte_addr(struct dma_pte *pte) |
823 | { |
824 | #ifdef CONFIG_64BIT |
825 | return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD); |
826 | #else |
827 | /* Must have a full atomic 64-bit read */ |
828 | return __cmpxchg64(&pte->val, 0ULL, 0ULL) & |
829 | VTD_PAGE_MASK & (~DMA_FL_PTE_XD); |
830 | #endif |
831 | } |
832 | |
833 | static inline bool dma_pte_present(struct dma_pte *pte) |
834 | { |
835 | return (pte->val & 3) != 0; |
836 | } |
837 | |
838 | static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte, |
839 | unsigned long flags) |
840 | { |
841 | if (flags & IOMMU_DIRTY_NO_CLEAR) |
842 | return (pte->val & DMA_SL_PTE_DIRTY) != 0; |
843 | |
844 | return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT, |
845 | addr: (unsigned long *)&pte->val); |
846 | } |
847 | |
848 | static inline bool dma_pte_superpage(struct dma_pte *pte) |
849 | { |
850 | return (pte->val & DMA_PTE_LARGE_PAGE); |
851 | } |
852 | |
853 | static inline bool first_pte_in_page(struct dma_pte *pte) |
854 | { |
855 | return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE); |
856 | } |
857 | |
858 | static inline int nr_pte_to_next_page(struct dma_pte *pte) |
859 | { |
860 | return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) : |
861 | (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte; |
862 | } |
863 | |
864 | static inline bool context_present(struct context_entry *context) |
865 | { |
866 | return (context->lo & 1); |
867 | } |
868 | |
869 | #define LEVEL_STRIDE (9) |
870 | #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) |
871 | #define MAX_AGAW_WIDTH (64) |
872 | #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT) |
873 | |
874 | static inline int agaw_to_level(int agaw) |
875 | { |
876 | return agaw + 2; |
877 | } |
878 | |
879 | static inline int agaw_to_width(int agaw) |
880 | { |
881 | return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH); |
882 | } |
883 | |
884 | static inline int width_to_agaw(int width) |
885 | { |
886 | return DIV_ROUND_UP(width - 30, LEVEL_STRIDE); |
887 | } |
888 | |
889 | static inline unsigned int level_to_offset_bits(int level) |
890 | { |
891 | return (level - 1) * LEVEL_STRIDE; |
892 | } |
893 | |
894 | static inline int pfn_level_offset(u64 pfn, int level) |
895 | { |
896 | return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; |
897 | } |
898 | |
899 | static inline u64 level_mask(int level) |
900 | { |
901 | return -1ULL << level_to_offset_bits(level); |
902 | } |
903 | |
904 | static inline u64 level_size(int level) |
905 | { |
906 | return 1ULL << level_to_offset_bits(level); |
907 | } |
908 | |
909 | static inline u64 align_to_level(u64 pfn, int level) |
910 | { |
911 | return (pfn + level_size(level) - 1) & level_mask(level); |
912 | } |
913 | |
914 | static inline unsigned long lvl_to_nr_pages(unsigned int lvl) |
915 | { |
916 | return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); |
917 | } |
918 | |
919 | /* VT-d pages must always be _smaller_ than MM pages. Otherwise things |
920 | are never going to work. */ |
921 | static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn) |
922 | { |
923 | return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); |
924 | } |
925 | static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn) |
926 | { |
927 | return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1; |
928 | } |
929 | static inline unsigned long page_to_dma_pfn(struct page *pg) |
930 | { |
931 | return mm_to_dma_pfn_start(page_to_pfn(pg)); |
932 | } |
933 | static inline unsigned long virt_to_dma_pfn(void *p) |
934 | { |
935 | return page_to_dma_pfn(virt_to_page(p)); |
936 | } |
937 | |
938 | static inline void context_set_present(struct context_entry *context) |
939 | { |
940 | context->lo |= 1; |
941 | } |
942 | |
943 | static inline void context_set_fault_enable(struct context_entry *context) |
944 | { |
945 | context->lo &= (((u64)-1) << 2) | 1; |
946 | } |
947 | |
948 | static inline void context_set_translation_type(struct context_entry *context, |
949 | unsigned long value) |
950 | { |
951 | context->lo &= (((u64)-1) << 4) | 3; |
952 | context->lo |= (value & 3) << 2; |
953 | } |
954 | |
955 | static inline void context_set_address_root(struct context_entry *context, |
956 | unsigned long value) |
957 | { |
958 | context->lo &= ~VTD_PAGE_MASK; |
959 | context->lo |= value & VTD_PAGE_MASK; |
960 | } |
961 | |
962 | static inline void context_set_address_width(struct context_entry *context, |
963 | unsigned long value) |
964 | { |
965 | context->hi |= value & 7; |
966 | } |
967 | |
968 | static inline void context_set_domain_id(struct context_entry *context, |
969 | unsigned long value) |
970 | { |
971 | context->hi |= (value & ((1 << 16) - 1)) << 8; |
972 | } |
973 | |
974 | static inline void context_set_pasid(struct context_entry *context) |
975 | { |
976 | context->lo |= CONTEXT_PASIDE; |
977 | } |
978 | |
979 | static inline int context_domain_id(struct context_entry *c) |
980 | { |
981 | return((c->hi >> 8) & 0xffff); |
982 | } |
983 | |
984 | static inline void context_clear_entry(struct context_entry *context) |
985 | { |
986 | context->lo = 0; |
987 | context->hi = 0; |
988 | } |
989 | |
990 | #ifdef CONFIG_INTEL_IOMMU |
991 | static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) |
992 | { |
993 | if (!iommu->copied_tables) |
994 | return false; |
995 | |
996 | return test_bit(((long)bus << 8) | devfn, iommu->copied_tables); |
997 | } |
998 | |
999 | static inline void |
1000 | set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) |
1001 | { |
1002 | set_bit(nr: ((long)bus << 8) | devfn, addr: iommu->copied_tables); |
1003 | } |
1004 | |
1005 | static inline void |
1006 | clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) |
1007 | { |
1008 | clear_bit(nr: ((long)bus << 8) | devfn, addr: iommu->copied_tables); |
1009 | } |
1010 | #endif /* CONFIG_INTEL_IOMMU */ |
1011 | |
1012 | /* |
1013 | * Set the RID_PASID field of a scalable mode context entry. The |
1014 | * IOMMU hardware will use the PASID value set in this field for |
1015 | * DMA translations of DMA requests without PASID. |
1016 | */ |
1017 | static inline void |
1018 | context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid) |
1019 | { |
1020 | context->hi |= pasid & ((1 << 20) - 1); |
1021 | } |
1022 | |
1023 | /* |
1024 | * Set the DTE(Device-TLB Enable) field of a scalable mode context |
1025 | * entry. |
1026 | */ |
1027 | static inline void context_set_sm_dte(struct context_entry *context) |
1028 | { |
1029 | context->lo |= BIT_ULL(2); |
1030 | } |
1031 | |
1032 | /* |
1033 | * Set the PRE(Page Request Enable) field of a scalable mode context |
1034 | * entry. |
1035 | */ |
1036 | static inline void context_set_sm_pre(struct context_entry *context) |
1037 | { |
1038 | context->lo |= BIT_ULL(4); |
1039 | } |
1040 | |
1041 | /* Convert value to context PASID directory size field coding. */ |
1042 | #define context_pdts(pds) (((pds) & 0x7) << 9) |
1043 | |
1044 | struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev); |
1045 | |
1046 | int dmar_enable_qi(struct intel_iommu *iommu); |
1047 | void dmar_disable_qi(struct intel_iommu *iommu); |
1048 | int dmar_reenable_qi(struct intel_iommu *iommu); |
1049 | void qi_global_iec(struct intel_iommu *iommu); |
1050 | |
1051 | void qi_flush_context(struct intel_iommu *iommu, u16 did, |
1052 | u16 sid, u8 fm, u64 type); |
1053 | void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
1054 | unsigned int size_order, u64 type); |
1055 | void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, |
1056 | u16 qdep, u64 addr, unsigned mask); |
1057 | |
1058 | void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, |
1059 | unsigned long npages, bool ih); |
1060 | |
1061 | void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, |
1062 | u32 pasid, u16 qdep, u64 addr, |
1063 | unsigned int size_order); |
1064 | void (struct device_domain_info *info, |
1065 | unsigned long address, unsigned long pages, |
1066 | u32 pasid, u16 qdep); |
1067 | void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, |
1068 | u32 pasid); |
1069 | |
1070 | int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, |
1071 | unsigned int count, unsigned long options); |
1072 | /* |
1073 | * Options used in qi_submit_sync: |
1074 | * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8. |
1075 | */ |
1076 | #define QI_OPT_WAIT_DRAIN BIT(0) |
1077 | |
1078 | void domain_update_iotlb(struct dmar_domain *domain); |
1079 | int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); |
1080 | void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); |
1081 | void device_block_translation(struct device *dev); |
1082 | int prepare_domain_attach_device(struct iommu_domain *domain, |
1083 | struct device *dev); |
1084 | void domain_update_iommu_cap(struct dmar_domain *domain); |
1085 | |
1086 | int dmar_ir_support(void); |
1087 | |
1088 | void *alloc_pgtable_page(int node, gfp_t gfp); |
1089 | void free_pgtable_page(void *vaddr); |
1090 | void iommu_flush_write_buffer(struct intel_iommu *iommu); |
1091 | struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, |
1092 | const struct iommu_user_data *user_data); |
1093 | struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid); |
1094 | |
1095 | #ifdef CONFIG_INTEL_IOMMU_SVM |
1096 | void intel_svm_check(struct intel_iommu *iommu); |
1097 | int intel_svm_enable_prq(struct intel_iommu *iommu); |
1098 | int intel_svm_finish_prq(struct intel_iommu *iommu); |
1099 | void intel_svm_page_response(struct device *dev, struct iopf_fault *evt, |
1100 | struct iommu_page_response *msg); |
1101 | struct iommu_domain *intel_svm_domain_alloc(void); |
1102 | void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid); |
1103 | void intel_drain_pasid_prq(struct device *dev, u32 pasid); |
1104 | |
1105 | struct intel_svm_dev { |
1106 | struct list_head list; |
1107 | struct rcu_head rcu; |
1108 | struct device *dev; |
1109 | struct intel_iommu *iommu; |
1110 | u16 did; |
1111 | u16 sid, qdep; |
1112 | }; |
1113 | |
1114 | struct intel_svm { |
1115 | struct mmu_notifier notifier; |
1116 | struct mm_struct *mm; |
1117 | u32 pasid; |
1118 | struct list_head devs; |
1119 | }; |
1120 | #else |
1121 | static inline void intel_svm_check(struct intel_iommu *iommu) {} |
1122 | static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {} |
1123 | static inline struct iommu_domain *intel_svm_domain_alloc(void) |
1124 | { |
1125 | return NULL; |
1126 | } |
1127 | |
1128 | static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid) |
1129 | { |
1130 | } |
1131 | #endif |
1132 | |
1133 | #ifdef CONFIG_INTEL_IOMMU_DEBUGFS |
1134 | void intel_iommu_debugfs_init(void); |
1135 | void intel_iommu_debugfs_create_dev(struct device_domain_info *info); |
1136 | void intel_iommu_debugfs_remove_dev(struct device_domain_info *info); |
1137 | void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid); |
1138 | void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid); |
1139 | #else |
1140 | static inline void intel_iommu_debugfs_init(void) {} |
1141 | static inline void intel_iommu_debugfs_create_dev(struct device_domain_info *info) {} |
1142 | static inline void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) {} |
1143 | static inline void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) {} |
1144 | static inline void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) {} |
1145 | #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ |
1146 | |
1147 | extern const struct attribute_group *intel_iommu_groups[]; |
1148 | struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, |
1149 | u8 devfn, int alloc); |
1150 | |
1151 | extern const struct iommu_ops intel_iommu_ops; |
1152 | |
1153 | #ifdef CONFIG_INTEL_IOMMU |
1154 | extern int intel_iommu_sm; |
1155 | int iommu_calculate_agaw(struct intel_iommu *iommu); |
1156 | int iommu_calculate_max_sagaw(struct intel_iommu *iommu); |
1157 | int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob); |
1158 | |
1159 | static inline bool ecmd_has_pmu_essential(struct intel_iommu *iommu) |
1160 | { |
1161 | return (iommu->ecmdcap[DMA_ECMD_ECCAP3] & DMA_ECMD_ECCAP3_ESSENTIAL) == |
1162 | DMA_ECMD_ECCAP3_ESSENTIAL; |
1163 | } |
1164 | |
1165 | extern int dmar_disabled; |
1166 | extern int intel_iommu_enabled; |
1167 | #else |
1168 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) |
1169 | { |
1170 | return 0; |
1171 | } |
1172 | static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) |
1173 | { |
1174 | return 0; |
1175 | } |
1176 | #define dmar_disabled (1) |
1177 | #define intel_iommu_enabled (0) |
1178 | #define intel_iommu_sm (0) |
1179 | #endif |
1180 | |
1181 | static inline const char *decode_prq_descriptor(char *str, size_t size, |
1182 | u64 dw0, u64 dw1, u64 dw2, u64 dw3) |
1183 | { |
1184 | char *buf = str; |
1185 | int bytes; |
1186 | |
1187 | bytes = snprintf(buf, size, |
1188 | fmt: "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx" , |
1189 | FIELD_GET(GENMASK_ULL(31, 16), dw0), |
1190 | FIELD_GET(GENMASK_ULL(63, 12), dw1), |
1191 | dw1 & BIT_ULL(0) ? 'r' : '-', |
1192 | dw1 & BIT_ULL(1) ? 'w' : '-', |
1193 | dw0 & BIT_ULL(52) ? 'x' : '-', |
1194 | dw0 & BIT_ULL(53) ? 'p' : '-', |
1195 | dw1 & BIT_ULL(2) ? 'l' : '-', |
1196 | FIELD_GET(GENMASK_ULL(51, 32), dw0), |
1197 | FIELD_GET(GENMASK_ULL(11, 3), dw1)); |
1198 | |
1199 | /* Private Data */ |
1200 | if (dw0 & BIT_ULL(9)) { |
1201 | size -= bytes; |
1202 | buf += bytes; |
1203 | snprintf(buf, size, fmt: " private=0x%llx/0x%llx\n" , dw2, dw3); |
1204 | } |
1205 | |
1206 | return str; |
1207 | } |
1208 | |
1209 | #endif |
1210 | |