1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (c) 2006, Intel Corporation. |
4 | * |
5 | * Copyright (C) 2006-2008 Intel Corporation |
6 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
7 | */ |
8 | |
9 | #ifndef _IOVA_H_ |
10 | #define _IOVA_H_ |
11 | |
12 | #include <linux/types.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/rbtree.h> |
15 | #include <linux/dma-mapping.h> |
16 | |
17 | /* iova structure */ |
18 | struct iova { |
19 | struct rb_node node; |
20 | unsigned long pfn_hi; /* Highest allocated pfn */ |
21 | unsigned long pfn_lo; /* Lowest allocated pfn */ |
22 | }; |
23 | |
24 | |
25 | struct iova_rcache; |
26 | |
27 | /* holds all the iova translations for a domain */ |
28 | struct iova_domain { |
29 | spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ |
30 | struct rb_root rbroot; /* iova domain rbtree root */ |
31 | struct rb_node *cached_node; /* Save last alloced node */ |
32 | struct rb_node *cached32_node; /* Save last 32-bit alloced node */ |
33 | unsigned long granule; /* pfn granularity for this domain */ |
34 | unsigned long start_pfn; /* Lower limit for this domain */ |
35 | unsigned long dma_32bit_pfn; |
36 | unsigned long max32_alloc_size; /* Size of last failed allocation */ |
37 | struct iova anchor; /* rbtree lookup anchor */ |
38 | |
39 | struct iova_rcache *rcaches; |
40 | struct hlist_node cpuhp_dead; |
41 | }; |
42 | |
43 | static inline unsigned long iova_size(struct iova *iova) |
44 | { |
45 | return iova->pfn_hi - iova->pfn_lo + 1; |
46 | } |
47 | |
48 | static inline unsigned long iova_shift(struct iova_domain *iovad) |
49 | { |
50 | return __ffs(iovad->granule); |
51 | } |
52 | |
53 | static inline unsigned long iova_mask(struct iova_domain *iovad) |
54 | { |
55 | return iovad->granule - 1; |
56 | } |
57 | |
58 | static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) |
59 | { |
60 | return iova & iova_mask(iovad); |
61 | } |
62 | |
63 | static inline size_t iova_align(struct iova_domain *iovad, size_t size) |
64 | { |
65 | return ALIGN(size, iovad->granule); |
66 | } |
67 | |
68 | static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) |
69 | { |
70 | return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); |
71 | } |
72 | |
73 | static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) |
74 | { |
75 | return iova >> iova_shift(iovad); |
76 | } |
77 | |
78 | #if IS_REACHABLE(CONFIG_IOMMU_IOVA) |
79 | int iova_cache_get(void); |
80 | void iova_cache_put(void); |
81 | |
82 | unsigned long iova_rcache_range(void); |
83 | |
84 | void free_iova(struct iova_domain *iovad, unsigned long pfn); |
85 | void __free_iova(struct iova_domain *iovad, struct iova *iova); |
86 | struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, |
87 | unsigned long limit_pfn, |
88 | bool size_aligned); |
89 | void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, |
90 | unsigned long size); |
91 | unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, |
92 | unsigned long limit_pfn, bool flush_rcache); |
93 | struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, |
94 | unsigned long pfn_hi); |
95 | void init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
96 | unsigned long start_pfn); |
97 | int iova_domain_init_rcaches(struct iova_domain *iovad); |
98 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); |
99 | void put_iova_domain(struct iova_domain *iovad); |
100 | #else |
101 | static inline int iova_cache_get(void) |
102 | { |
103 | return -ENOTSUPP; |
104 | } |
105 | |
106 | static inline void iova_cache_put(void) |
107 | { |
108 | } |
109 | |
110 | static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) |
111 | { |
112 | } |
113 | |
114 | static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) |
115 | { |
116 | } |
117 | |
118 | static inline struct iova *alloc_iova(struct iova_domain *iovad, |
119 | unsigned long size, |
120 | unsigned long limit_pfn, |
121 | bool size_aligned) |
122 | { |
123 | return NULL; |
124 | } |
125 | |
126 | static inline void free_iova_fast(struct iova_domain *iovad, |
127 | unsigned long pfn, |
128 | unsigned long size) |
129 | { |
130 | } |
131 | |
132 | static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, |
133 | unsigned long size, |
134 | unsigned long limit_pfn, |
135 | bool flush_rcache) |
136 | { |
137 | return 0; |
138 | } |
139 | |
140 | static inline struct iova *reserve_iova(struct iova_domain *iovad, |
141 | unsigned long pfn_lo, |
142 | unsigned long pfn_hi) |
143 | { |
144 | return NULL; |
145 | } |
146 | |
147 | static inline void init_iova_domain(struct iova_domain *iovad, |
148 | unsigned long granule, |
149 | unsigned long start_pfn) |
150 | { |
151 | } |
152 | |
153 | static inline struct iova *find_iova(struct iova_domain *iovad, |
154 | unsigned long pfn) |
155 | { |
156 | return NULL; |
157 | } |
158 | |
159 | static inline void put_iova_domain(struct iova_domain *iovad) |
160 | { |
161 | } |
162 | |
163 | #endif |
164 | |
165 | #endif |
166 | |