1/*
2 * Copyright © 2006-2015, Intel Corporation.
3 *
4 * Authors: Ashok Raj <ashok.raj@intel.com>
5 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
6 * David Woodhouse <David.Woodhouse@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
21
22#ifndef _INTEL_IOMMU_H_
23#define _INTEL_IOMMU_H_
24
25#include <linux/types.h>
26#include <linux/iova.h>
27#include <linux/io.h>
28#include <linux/idr.h>
29#include <linux/mmu_notifier.h>
30#include <linux/list.h>
31#include <linux/iommu.h>
32#include <linux/io-64-nonatomic-lo-hi.h>
33#include <linux/dmar.h>
34
35#include <asm/cacheflush.h>
36#include <asm/iommu.h>
37
38/*
39 * VT-d hardware uses 4KiB page size regardless of host page size.
40 */
41#define VTD_PAGE_SHIFT (12)
42#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
43#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
44#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
45
46#define VTD_STRIDE_SHIFT (9)
47#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
48
49#define DMA_PTE_READ (1)
50#define DMA_PTE_WRITE (2)
51#define DMA_PTE_LARGE_PAGE (1 << 7)
52#define DMA_PTE_SNP (1 << 11)
53
54#define CONTEXT_TT_MULTI_LEVEL 0
55#define CONTEXT_TT_DEV_IOTLB 1
56#define CONTEXT_TT_PASS_THROUGH 2
57#define CONTEXT_PASIDE BIT_ULL(3)
58
59/*
60 * Intel IOMMU register specification per version 1.0 public spec.
61 */
62#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
63#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
64#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
65#define DMAR_GCMD_REG 0x18 /* Global command register */
66#define DMAR_GSTS_REG 0x1c /* Global status register */
67#define DMAR_RTADDR_REG 0x20 /* Root entry table */
68#define DMAR_CCMD_REG 0x28 /* Context command reg */
69#define DMAR_FSTS_REG 0x34 /* Fault Status register */
70#define DMAR_FECTL_REG 0x38 /* Fault control register */
71#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
72#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
73#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
74#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
75#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
76#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
77#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
78#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
79#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
80#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
81#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
82#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
83#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
84#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
85#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
86#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
87#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
88#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
89#define DMAR_PRS_REG 0xdc /* Page request status register */
90#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
91#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
92#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
93#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
94#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
95#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
96#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
97#define DMAR_MTRR_FIX16K_80000_REG 0x128
98#define DMAR_MTRR_FIX16K_A0000_REG 0x130
99#define DMAR_MTRR_FIX4K_C0000_REG 0x138
100#define DMAR_MTRR_FIX4K_C8000_REG 0x140
101#define DMAR_MTRR_FIX4K_D0000_REG 0x148
102#define DMAR_MTRR_FIX4K_D8000_REG 0x150
103#define DMAR_MTRR_FIX4K_E0000_REG 0x158
104#define DMAR_MTRR_FIX4K_E8000_REG 0x160
105#define DMAR_MTRR_FIX4K_F0000_REG 0x168
106#define DMAR_MTRR_FIX4K_F8000_REG 0x170
107#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
108#define DMAR_MTRR_PHYSMASK0_REG 0x188
109#define DMAR_MTRR_PHYSBASE1_REG 0x190
110#define DMAR_MTRR_PHYSMASK1_REG 0x198
111#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
112#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
113#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
114#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
115#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
116#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
117#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
118#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
119#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
120#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
121#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
122#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
123#define DMAR_MTRR_PHYSBASE8_REG 0x200
124#define DMAR_MTRR_PHYSMASK8_REG 0x208
125#define DMAR_MTRR_PHYSBASE9_REG 0x210
126#define DMAR_MTRR_PHYSMASK9_REG 0x218
127#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
128#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
129#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
130
131#define OFFSET_STRIDE (9)
132
133#define dmar_readq(a) readq(a)
134#define dmar_writeq(a,v) writeq(v,a)
135
136#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
137#define DMAR_VER_MINOR(v) ((v) & 0x0f)
138
139/*
140 * Decoding Capability Register
141 */
142#define cap_5lp_support(c) (((c) >> 60) & 1)
143#define cap_pi_support(c) (((c) >> 59) & 1)
144#define cap_fl1gp_support(c) (((c) >> 56) & 1)
145#define cap_read_drain(c) (((c) >> 55) & 1)
146#define cap_write_drain(c) (((c) >> 54) & 1)
147#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
148#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
149#define cap_pgsel_inv(c) (((c) >> 39) & 1)
150
151#define cap_super_page_val(c) (((c) >> 34) & 0xf)
152#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
153 * OFFSET_STRIDE) + 21)
154
155#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
156#define cap_max_fault_reg_offset(c) \
157 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
158
159#define cap_zlr(c) (((c) >> 22) & 1)
160#define cap_isoch(c) (((c) >> 23) & 1)
161#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
162#define cap_sagaw(c) (((c) >> 8) & 0x1f)
163#define cap_caching_mode(c) (((c) >> 7) & 1)
164#define cap_phmr(c) (((c) >> 6) & 1)
165#define cap_plmr(c) (((c) >> 5) & 1)
166#define cap_rwbf(c) (((c) >> 4) & 1)
167#define cap_afl(c) (((c) >> 3) & 1)
168#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
169/*
170 * Extended Capability Register
171 */
172
173#define ecap_smpwc(e) (((e) >> 48) & 0x1)
174#define ecap_flts(e) (((e) >> 47) & 0x1)
175#define ecap_slts(e) (((e) >> 46) & 0x1)
176#define ecap_smts(e) (((e) >> 43) & 0x1)
177#define ecap_dit(e) ((e >> 41) & 0x1)
178#define ecap_pasid(e) ((e >> 40) & 0x1)
179#define ecap_pss(e) ((e >> 35) & 0x1f)
180#define ecap_eafs(e) ((e >> 34) & 0x1)
181#define ecap_nwfs(e) ((e >> 33) & 0x1)
182#define ecap_srs(e) ((e >> 31) & 0x1)
183#define ecap_ers(e) ((e >> 30) & 0x1)
184#define ecap_prs(e) ((e >> 29) & 0x1)
185#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
186#define ecap_dis(e) ((e >> 27) & 0x1)
187#define ecap_nest(e) ((e >> 26) & 0x1)
188#define ecap_mts(e) ((e >> 25) & 0x1)
189#define ecap_ecs(e) ((e >> 24) & 0x1)
190#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
191#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
192#define ecap_coherent(e) ((e) & 0x1)
193#define ecap_qis(e) ((e) & 0x2)
194#define ecap_pass_through(e) ((e >> 6) & 0x1)
195#define ecap_eim_support(e) ((e >> 4) & 0x1)
196#define ecap_ir_support(e) ((e >> 3) & 0x1)
197#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
198#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
199#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
200
201/* IOTLB_REG */
202#define DMA_TLB_FLUSH_GRANU_OFFSET 60
203#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
204#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
205#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
206#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
207#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
208#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
209#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
210#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
211#define DMA_TLB_IVT (((u64)1) << 63)
212#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
213#define DMA_TLB_MAX_SIZE (0x3f)
214
215/* INVALID_DESC */
216#define DMA_CCMD_INVL_GRANU_OFFSET 61
217#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
218#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
219#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
220#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
221#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
222#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
223#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
224#define DMA_ID_TLB_ADDR(addr) (addr)
225#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
226
227/* PMEN_REG */
228#define DMA_PMEN_EPM (((u32)1)<<31)
229#define DMA_PMEN_PRS (((u32)1)<<0)
230
231/* GCMD_REG */
232#define DMA_GCMD_TE (((u32)1) << 31)
233#define DMA_GCMD_SRTP (((u32)1) << 30)
234#define DMA_GCMD_SFL (((u32)1) << 29)
235#define DMA_GCMD_EAFL (((u32)1) << 28)
236#define DMA_GCMD_WBF (((u32)1) << 27)
237#define DMA_GCMD_QIE (((u32)1) << 26)
238#define DMA_GCMD_SIRTP (((u32)1) << 24)
239#define DMA_GCMD_IRE (((u32) 1) << 25)
240#define DMA_GCMD_CFI (((u32) 1) << 23)
241
242/* GSTS_REG */
243#define DMA_GSTS_TES (((u32)1) << 31)
244#define DMA_GSTS_RTPS (((u32)1) << 30)
245#define DMA_GSTS_FLS (((u32)1) << 29)
246#define DMA_GSTS_AFLS (((u32)1) << 28)
247#define DMA_GSTS_WBFS (((u32)1) << 27)
248#define DMA_GSTS_QIES (((u32)1) << 26)
249#define DMA_GSTS_IRTPS (((u32)1) << 24)
250#define DMA_GSTS_IRES (((u32)1) << 25)
251#define DMA_GSTS_CFIS (((u32)1) << 23)
252
253/* DMA_RTADDR_REG */
254#define DMA_RTADDR_RTT (((u64)1) << 11)
255#define DMA_RTADDR_SMT (((u64)1) << 10)
256
257/* CCMD_REG */
258#define DMA_CCMD_ICC (((u64)1) << 63)
259#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
260#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
261#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
262#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
263#define DMA_CCMD_MASK_NOBIT 0
264#define DMA_CCMD_MASK_1BIT 1
265#define DMA_CCMD_MASK_2BIT 2
266#define DMA_CCMD_MASK_3BIT 3
267#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
268#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
269
270/* FECTL_REG */
271#define DMA_FECTL_IM (((u32)1) << 31)
272
273/* FSTS_REG */
274#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
275#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
276#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
277#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
278#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
279#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
280#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
281
282/* FRCD_REG, 32 bits access */
283#define DMA_FRCD_F (((u32)1) << 31)
284#define dma_frcd_type(d) ((d >> 30) & 1)
285#define dma_frcd_fault_reason(c) (c & 0xff)
286#define dma_frcd_source_id(c) (c & 0xffff)
287/* low 64 bit */
288#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
289
290/* PRS_REG */
291#define DMA_PRS_PPR ((u32)1)
292
293#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
294do { \
295 cycles_t start_time = get_cycles(); \
296 while (1) { \
297 sts = op(iommu->reg + offset); \
298 if (cond) \
299 break; \
300 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
301 panic("DMAR hardware is malfunctioning\n"); \
302 cpu_relax(); \
303 } \
304} while (0)
305
306#define QI_LENGTH 256 /* queue length */
307
308enum {
309 QI_FREE,
310 QI_IN_USE,
311 QI_DONE,
312 QI_ABORT
313};
314
315#define QI_CC_TYPE 0x1
316#define QI_IOTLB_TYPE 0x2
317#define QI_DIOTLB_TYPE 0x3
318#define QI_IEC_TYPE 0x4
319#define QI_IWD_TYPE 0x5
320#define QI_EIOTLB_TYPE 0x6
321#define QI_PC_TYPE 0x7
322#define QI_DEIOTLB_TYPE 0x8
323#define QI_PGRP_RESP_TYPE 0x9
324#define QI_PSTRM_RESP_TYPE 0xa
325
326#define QI_IEC_SELECTIVE (((u64)1) << 4)
327#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
328#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
329
330#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
331#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
332
333#define QI_IOTLB_DID(did) (((u64)did) << 16)
334#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
335#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
336#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
337#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
338#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
339#define QI_IOTLB_AM(am) (((u8)am))
340
341#define QI_CC_FM(fm) (((u64)fm) << 48)
342#define QI_CC_SID(sid) (((u64)sid) << 32)
343#define QI_CC_DID(did) (((u64)did) << 16)
344#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
345
346#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
347#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
348#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
349#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
350#define QI_DEV_IOTLB_SIZE 1
351#define QI_DEV_IOTLB_MAX_INVS 32
352
353#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
354#define QI_PC_DID(did) (((u64)did) << 16)
355#define QI_PC_GRAN(gran) (((u64)gran) << 4)
356
357#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
358#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
359
360#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
361#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
362#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
363#define QI_EIOTLB_AM(am) (((u64)am))
364#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
365#define QI_EIOTLB_DID(did) (((u64)did) << 16)
366#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
367
368#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
369#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
370#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
371#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
372#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
373#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
374#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
375#define QI_DEV_EIOTLB_MAX_INVS 32
376
377/* Page group response descriptor QW0 */
378#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
379#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
380#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
381#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
382#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
383
384/* Page group response descriptor QW1 */
385#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
386#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
387
388
389#define QI_RESP_SUCCESS 0x0
390#define QI_RESP_INVALID 0x1
391#define QI_RESP_FAILURE 0xf
392
393#define QI_GRAN_ALL_ALL 0
394#define QI_GRAN_NONG_ALL 1
395#define QI_GRAN_NONG_PASID 2
396#define QI_GRAN_PSI_PASID 3
397
398#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
399
400struct qi_desc {
401 u64 qw0;
402 u64 qw1;
403 u64 qw2;
404 u64 qw3;
405};
406
407struct q_inval {
408 raw_spinlock_t q_lock;
409 void *desc; /* invalidation queue */
410 int *desc_status; /* desc status */
411 int free_head; /* first free entry */
412 int free_tail; /* last free entry */
413 int free_cnt;
414};
415
416#ifdef CONFIG_IRQ_REMAP
417/* 1MB - maximum possible interrupt remapping table size */
418#define INTR_REMAP_PAGE_ORDER 8
419#define INTR_REMAP_TABLE_REG_SIZE 0xf
420#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
421
422#define INTR_REMAP_TABLE_ENTRIES 65536
423
424struct irq_domain;
425
426struct ir_table {
427 struct irte *base;
428 unsigned long *bitmap;
429};
430#endif
431
432struct iommu_flush {
433 void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
434 u8 fm, u64 type);
435 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
436 unsigned int size_order, u64 type);
437};
438
439enum {
440 SR_DMAR_FECTL_REG,
441 SR_DMAR_FEDATA_REG,
442 SR_DMAR_FEADDR_REG,
443 SR_DMAR_FEUADDR_REG,
444 MAX_SR_DMAR_REGS
445};
446
447#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
448#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
449
450struct pasid_entry;
451struct pasid_state_entry;
452struct page_req_dsc;
453
454/*
455 * 0: Present
456 * 1-11: Reserved
457 * 12-63: Context Ptr (12 - (haw-1))
458 * 64-127: Reserved
459 */
460struct root_entry {
461 u64 lo;
462 u64 hi;
463};
464
465/*
466 * low 64 bits:
467 * 0: present
468 * 1: fault processing disable
469 * 2-3: translation type
470 * 12-63: address space root
471 * high 64 bits:
472 * 0-2: address width
473 * 3-6: aval
474 * 8-23: domain id
475 */
476struct context_entry {
477 u64 lo;
478 u64 hi;
479};
480
481struct dmar_domain {
482 int nid; /* node id */
483
484 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
485 /* Refcount of devices per iommu */
486
487
488 u16 iommu_did[DMAR_UNITS_SUPPORTED];
489 /* Domain ids per IOMMU. Use u16 since
490 * domain ids are 16 bit wide according
491 * to VT-d spec, section 9.3 */
492
493 bool has_iotlb_device;
494 struct list_head devices; /* all devices' list */
495 struct iova_domain iovad; /* iova's that belong to this domain */
496
497 struct dma_pte *pgd; /* virtual address */
498 int gaw; /* max guest address width */
499
500 /* adjusted guest address width, 0 is level 2 30-bit */
501 int agaw;
502
503 int flags; /* flags to find out type of domain */
504
505 int iommu_coherency;/* indicate coherency of iommu access */
506 int iommu_snooping; /* indicate snooping control feature*/
507 int iommu_count; /* reference count of iommu */
508 int iommu_superpage;/* Level of superpages supported:
509 0 == 4KiB (no superpages), 1 == 2MiB,
510 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
511 u64 max_addr; /* maximum mapped address */
512
513 struct iommu_domain domain; /* generic domain data structure for
514 iommu core */
515};
516
517struct intel_iommu {
518 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
519 u64 reg_phys; /* physical address of hw register set */
520 u64 reg_size; /* size of hw register set */
521 u64 cap;
522 u64 ecap;
523 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
524 raw_spinlock_t register_lock; /* protect register handling */
525 int seq_id; /* sequence id of the iommu */
526 int agaw; /* agaw of this iommu */
527 int msagaw; /* max sagaw of this iommu */
528 unsigned int irq, pr_irq;
529 u16 segment; /* PCI segment# */
530 unsigned char name[13]; /* Device Name */
531
532#ifdef CONFIG_INTEL_IOMMU
533 unsigned long *domain_ids; /* bitmap of domains */
534 struct dmar_domain ***domains; /* ptr to domains */
535 spinlock_t lock; /* protect context, domain ids */
536 struct root_entry *root_entry; /* virtual address */
537
538 struct iommu_flush flush;
539#endif
540#ifdef CONFIG_INTEL_IOMMU_SVM
541 struct page_req_dsc *prq;
542 unsigned char prq_name[16]; /* Name for PRQ interrupt */
543#endif
544 struct q_inval *qi; /* Queued invalidation info */
545 u32 *iommu_state; /* Store iommu states between suspend and resume.*/
546
547#ifdef CONFIG_IRQ_REMAP
548 struct ir_table *ir_table; /* Interrupt remapping info */
549 struct irq_domain *ir_domain;
550 struct irq_domain *ir_msi_domain;
551#endif
552 struct iommu_device iommu; /* IOMMU core code handle */
553 int node;
554 u32 flags; /* Software defined flags */
555};
556
557/* PCI domain-device relationship */
558struct device_domain_info {
559 struct list_head link; /* link to domain siblings */
560 struct list_head global; /* link to global list */
561 struct list_head table; /* link to pasid table */
562 u8 bus; /* PCI bus number */
563 u8 devfn; /* PCI devfn number */
564 u16 pfsid; /* SRIOV physical function source ID */
565 u8 pasid_supported:3;
566 u8 pasid_enabled:1;
567 u8 pri_supported:1;
568 u8 pri_enabled:1;
569 u8 ats_supported:1;
570 u8 ats_enabled:1;
571 u8 ats_qdep;
572 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
573 struct intel_iommu *iommu; /* IOMMU used by this device */
574 struct dmar_domain *domain; /* pointer to domain */
575 struct pasid_table *pasid_table; /* pasid table */
576};
577
578static inline void __iommu_flush_cache(
579 struct intel_iommu *iommu, void *addr, int size)
580{
581 if (!ecap_coherent(iommu->ecap))
582 clflush_cache_range(addr, size);
583}
584
585/*
586 * 0: readable
587 * 1: writable
588 * 2-6: reserved
589 * 7: super page
590 * 8-10: available
591 * 11: snoop behavior
592 * 12-63: Host physcial address
593 */
594struct dma_pte {
595 u64 val;
596};
597
598static inline void dma_clear_pte(struct dma_pte *pte)
599{
600 pte->val = 0;
601}
602
603static inline u64 dma_pte_addr(struct dma_pte *pte)
604{
605#ifdef CONFIG_64BIT
606 return pte->val & VTD_PAGE_MASK;
607#else
608 /* Must have a full atomic 64-bit read */
609 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
610#endif
611}
612
613static inline bool dma_pte_present(struct dma_pte *pte)
614{
615 return (pte->val & 3) != 0;
616}
617
618static inline bool dma_pte_superpage(struct dma_pte *pte)
619{
620 return (pte->val & DMA_PTE_LARGE_PAGE);
621}
622
623static inline int first_pte_in_page(struct dma_pte *pte)
624{
625 return !((unsigned long)pte & ~VTD_PAGE_MASK);
626}
627
628extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
629extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
630
631extern int dmar_enable_qi(struct intel_iommu *iommu);
632extern void dmar_disable_qi(struct intel_iommu *iommu);
633extern int dmar_reenable_qi(struct intel_iommu *iommu);
634extern void qi_global_iec(struct intel_iommu *iommu);
635
636extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
637 u8 fm, u64 type);
638extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
639 unsigned int size_order, u64 type);
640extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
641 u16 qdep, u64 addr, unsigned mask);
642extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
643
644extern int dmar_ir_support(void);
645
646struct dmar_domain *get_valid_domain_for_dev(struct device *dev);
647void *alloc_pgtable_page(int node);
648void free_pgtable_page(void *vaddr);
649struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
650int for_each_device_domain(int (*fn)(struct device_domain_info *info,
651 void *data), void *data);
652void iommu_flush_write_buffer(struct intel_iommu *iommu);
653
654#ifdef CONFIG_INTEL_IOMMU_SVM
655int intel_svm_init(struct intel_iommu *iommu);
656extern int intel_svm_enable_prq(struct intel_iommu *iommu);
657extern int intel_svm_finish_prq(struct intel_iommu *iommu);
658
659struct svm_dev_ops;
660
661struct intel_svm_dev {
662 struct list_head list;
663 struct rcu_head rcu;
664 struct device *dev;
665 struct svm_dev_ops *ops;
666 int users;
667 u16 did;
668 u16 dev_iotlb:1;
669 u16 sid, qdep;
670};
671
672struct intel_svm {
673 struct mmu_notifier notifier;
674 struct mm_struct *mm;
675 struct intel_iommu *iommu;
676 int flags;
677 int pasid;
678 struct list_head devs;
679 struct list_head list;
680};
681
682extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
683extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
684#endif
685
686#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
687void intel_iommu_debugfs_init(void);
688#else
689static inline void intel_iommu_debugfs_init(void) {}
690#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
691
692extern const struct attribute_group *intel_iommu_groups[];
693bool context_present(struct context_entry *context);
694struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
695 u8 devfn, int alloc);
696
697#ifdef CONFIG_INTEL_IOMMU
698extern int iommu_calculate_agaw(struct intel_iommu *iommu);
699extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
700extern int dmar_disabled;
701extern int intel_iommu_enabled;
702extern int intel_iommu_tboot_noforce;
703#else
704static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
705{
706 return 0;
707}
708static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
709{
710 return 0;
711}
712#define dmar_disabled (1)
713#define intel_iommu_enabled (0)
714#endif
715
716#endif
717