1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/device.h> |
3 | #include <linux/pci.h> |
4 | #include "pci.h" |
5 | |
6 | /* |
7 | * PCI iomap devres |
8 | */ |
9 | #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS |
10 | |
11 | struct pcim_iomap_devres { |
12 | void __iomem *table[PCIM_IOMAP_MAX]; |
13 | }; |
14 | |
15 | |
16 | static void devm_pci_unmap_iospace(struct device *dev, void *ptr) |
17 | { |
18 | struct resource **res = ptr; |
19 | |
20 | pci_unmap_iospace(res: *res); |
21 | } |
22 | |
23 | /** |
24 | * devm_pci_remap_iospace - Managed pci_remap_iospace() |
25 | * @dev: Generic device to remap IO address for |
26 | * @res: Resource describing the I/O space |
27 | * @phys_addr: physical address of range to be mapped |
28 | * |
29 | * Managed pci_remap_iospace(). Map is automatically unmapped on driver |
30 | * detach. |
31 | */ |
32 | int devm_pci_remap_iospace(struct device *dev, const struct resource *res, |
33 | phys_addr_t phys_addr) |
34 | { |
35 | const struct resource **ptr; |
36 | int error; |
37 | |
38 | ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL); |
39 | if (!ptr) |
40 | return -ENOMEM; |
41 | |
42 | error = pci_remap_iospace(res, phys_addr); |
43 | if (error) { |
44 | devres_free(res: ptr); |
45 | } else { |
46 | *ptr = res; |
47 | devres_add(dev, res: ptr); |
48 | } |
49 | |
50 | return error; |
51 | } |
52 | EXPORT_SYMBOL(devm_pci_remap_iospace); |
53 | |
54 | /** |
55 | * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() |
56 | * @dev: Generic device to remap IO address for |
57 | * @offset: Resource address to map |
58 | * @size: Size of map |
59 | * |
60 | * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver |
61 | * detach. |
62 | */ |
63 | void __iomem *devm_pci_remap_cfgspace(struct device *dev, |
64 | resource_size_t offset, |
65 | resource_size_t size) |
66 | { |
67 | void __iomem **ptr, *addr; |
68 | |
69 | ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); |
70 | if (!ptr) |
71 | return NULL; |
72 | |
73 | addr = pci_remap_cfgspace(offset, size); |
74 | if (addr) { |
75 | *ptr = addr; |
76 | devres_add(dev, res: ptr); |
77 | } else |
78 | devres_free(res: ptr); |
79 | |
80 | return addr; |
81 | } |
82 | EXPORT_SYMBOL(devm_pci_remap_cfgspace); |
83 | |
84 | /** |
85 | * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource |
86 | * @dev: generic device to handle the resource for |
87 | * @res: configuration space resource to be handled |
88 | * |
89 | * Checks that a resource is a valid memory region, requests the memory |
90 | * region and ioremaps with pci_remap_cfgspace() API that ensures the |
91 | * proper PCI configuration space memory attributes are guaranteed. |
92 | * |
93 | * All operations are managed and will be undone on driver detach. |
94 | * |
95 | * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code |
96 | * on failure. Usage example:: |
97 | * |
98 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
99 | * base = devm_pci_remap_cfg_resource(&pdev->dev, res); |
100 | * if (IS_ERR(base)) |
101 | * return PTR_ERR(base); |
102 | */ |
103 | void __iomem *devm_pci_remap_cfg_resource(struct device *dev, |
104 | struct resource *res) |
105 | { |
106 | resource_size_t size; |
107 | const char *name; |
108 | void __iomem *dest_ptr; |
109 | |
110 | BUG_ON(!dev); |
111 | |
112 | if (!res || resource_type(res) != IORESOURCE_MEM) { |
113 | dev_err(dev, "invalid resource\n" ); |
114 | return IOMEM_ERR_PTR(-EINVAL); |
115 | } |
116 | |
117 | size = resource_size(res); |
118 | |
119 | if (res->name) |
120 | name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s %s" , dev_name(dev), |
121 | res->name); |
122 | else |
123 | name = devm_kstrdup(dev, s: dev_name(dev), GFP_KERNEL); |
124 | if (!name) |
125 | return IOMEM_ERR_PTR(-ENOMEM); |
126 | |
127 | if (!devm_request_mem_region(dev, res->start, size, name)) { |
128 | dev_err(dev, "can't request region for resource %pR\n" , res); |
129 | return IOMEM_ERR_PTR(-EBUSY); |
130 | } |
131 | |
132 | dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size); |
133 | if (!dest_ptr) { |
134 | dev_err(dev, "ioremap failed for resource %pR\n" , res); |
135 | devm_release_mem_region(dev, res->start, size); |
136 | dest_ptr = IOMEM_ERR_PTR(-ENOMEM); |
137 | } |
138 | |
139 | return dest_ptr; |
140 | } |
141 | EXPORT_SYMBOL(devm_pci_remap_cfg_resource); |
142 | |
143 | /** |
144 | * pcim_set_mwi - a device-managed pci_set_mwi() |
145 | * @dev: the PCI device for which MWI is enabled |
146 | * |
147 | * Managed pci_set_mwi(). |
148 | * |
149 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. |
150 | */ |
151 | int pcim_set_mwi(struct pci_dev *dev) |
152 | { |
153 | struct pci_devres *dr; |
154 | |
155 | dr = find_pci_dr(pdev: dev); |
156 | if (!dr) |
157 | return -ENOMEM; |
158 | |
159 | dr->mwi = 1; |
160 | return pci_set_mwi(dev); |
161 | } |
162 | EXPORT_SYMBOL(pcim_set_mwi); |
163 | |
164 | |
165 | static void pcim_release(struct device *gendev, void *res) |
166 | { |
167 | struct pci_dev *dev = to_pci_dev(gendev); |
168 | struct pci_devres *this = res; |
169 | int i; |
170 | |
171 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
172 | if (this->region_mask & (1 << i)) |
173 | pci_release_region(dev, i); |
174 | |
175 | if (this->mwi) |
176 | pci_clear_mwi(dev); |
177 | |
178 | if (this->restore_intx) |
179 | pci_intx(dev, enable: this->orig_intx); |
180 | |
181 | if (this->enabled && !this->pinned) |
182 | pci_disable_device(dev); |
183 | } |
184 | |
185 | /* |
186 | * TODO: After the last four callers in pci.c are ported, find_pci_dr() |
187 | * needs to be made static again. |
188 | */ |
189 | struct pci_devres *find_pci_dr(struct pci_dev *pdev) |
190 | { |
191 | if (pci_is_managed(pdev)) |
192 | return devres_find(dev: &pdev->dev, release: pcim_release, NULL, NULL); |
193 | return NULL; |
194 | } |
195 | |
196 | static struct pci_devres *get_pci_dr(struct pci_dev *pdev) |
197 | { |
198 | struct pci_devres *dr, *new_dr; |
199 | |
200 | dr = devres_find(dev: &pdev->dev, release: pcim_release, NULL, NULL); |
201 | if (dr) |
202 | return dr; |
203 | |
204 | new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); |
205 | if (!new_dr) |
206 | return NULL; |
207 | return devres_get(dev: &pdev->dev, new_res: new_dr, NULL, NULL); |
208 | } |
209 | |
210 | /** |
211 | * pcim_enable_device - Managed pci_enable_device() |
212 | * @pdev: PCI device to be initialized |
213 | * |
214 | * Managed pci_enable_device(). |
215 | */ |
216 | int pcim_enable_device(struct pci_dev *pdev) |
217 | { |
218 | struct pci_devres *dr; |
219 | int rc; |
220 | |
221 | dr = get_pci_dr(pdev); |
222 | if (unlikely(!dr)) |
223 | return -ENOMEM; |
224 | if (dr->enabled) |
225 | return 0; |
226 | |
227 | rc = pci_enable_device(dev: pdev); |
228 | if (!rc) { |
229 | pdev->is_managed = 1; |
230 | dr->enabled = 1; |
231 | } |
232 | return rc; |
233 | } |
234 | EXPORT_SYMBOL(pcim_enable_device); |
235 | |
236 | /** |
237 | * pcim_pin_device - Pin managed PCI device |
238 | * @pdev: PCI device to pin |
239 | * |
240 | * Pin managed PCI device @pdev. Pinned device won't be disabled on |
241 | * driver detach. @pdev must have been enabled with |
242 | * pcim_enable_device(). |
243 | */ |
244 | void pcim_pin_device(struct pci_dev *pdev) |
245 | { |
246 | struct pci_devres *dr; |
247 | |
248 | dr = find_pci_dr(pdev); |
249 | WARN_ON(!dr || !dr->enabled); |
250 | if (dr) |
251 | dr->pinned = 1; |
252 | } |
253 | EXPORT_SYMBOL(pcim_pin_device); |
254 | |
255 | static void pcim_iomap_release(struct device *gendev, void *res) |
256 | { |
257 | struct pci_dev *dev = to_pci_dev(gendev); |
258 | struct pcim_iomap_devres *this = res; |
259 | int i; |
260 | |
261 | for (i = 0; i < PCIM_IOMAP_MAX; i++) |
262 | if (this->table[i]) |
263 | pci_iounmap(dev, this->table[i]); |
264 | } |
265 | |
266 | /** |
267 | * pcim_iomap_table - access iomap allocation table |
268 | * @pdev: PCI device to access iomap table for |
269 | * |
270 | * Access iomap allocation table for @dev. If iomap table doesn't |
271 | * exist and @pdev is managed, it will be allocated. All iomaps |
272 | * recorded in the iomap table are automatically unmapped on driver |
273 | * detach. |
274 | * |
275 | * This function might sleep when the table is first allocated but can |
276 | * be safely called without context and guaranteed to succeed once |
277 | * allocated. |
278 | */ |
279 | void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) |
280 | { |
281 | struct pcim_iomap_devres *dr, *new_dr; |
282 | |
283 | dr = devres_find(dev: &pdev->dev, release: pcim_iomap_release, NULL, NULL); |
284 | if (dr) |
285 | return dr->table; |
286 | |
287 | new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, |
288 | dev_to_node(&pdev->dev)); |
289 | if (!new_dr) |
290 | return NULL; |
291 | dr = devres_get(dev: &pdev->dev, new_res: new_dr, NULL, NULL); |
292 | return dr->table; |
293 | } |
294 | EXPORT_SYMBOL(pcim_iomap_table); |
295 | |
296 | /** |
297 | * pcim_iomap - Managed pcim_iomap() |
298 | * @pdev: PCI device to iomap for |
299 | * @bar: BAR to iomap |
300 | * @maxlen: Maximum length of iomap |
301 | * |
302 | * Managed pci_iomap(). Map is automatically unmapped on driver |
303 | * detach. |
304 | */ |
305 | void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) |
306 | { |
307 | void __iomem **tbl; |
308 | |
309 | BUG_ON(bar >= PCIM_IOMAP_MAX); |
310 | |
311 | tbl = (void __iomem **)pcim_iomap_table(pdev); |
312 | if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ |
313 | return NULL; |
314 | |
315 | tbl[bar] = pci_iomap(dev: pdev, bar, max: maxlen); |
316 | return tbl[bar]; |
317 | } |
318 | EXPORT_SYMBOL(pcim_iomap); |
319 | |
320 | /** |
321 | * pcim_iounmap - Managed pci_iounmap() |
322 | * @pdev: PCI device to iounmap for |
323 | * @addr: Address to unmap |
324 | * |
325 | * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). |
326 | */ |
327 | void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) |
328 | { |
329 | void __iomem **tbl; |
330 | int i; |
331 | |
332 | pci_iounmap(dev: pdev, addr); |
333 | |
334 | tbl = (void __iomem **)pcim_iomap_table(pdev); |
335 | BUG_ON(!tbl); |
336 | |
337 | for (i = 0; i < PCIM_IOMAP_MAX; i++) |
338 | if (tbl[i] == addr) { |
339 | tbl[i] = NULL; |
340 | return; |
341 | } |
342 | WARN_ON(1); |
343 | } |
344 | EXPORT_SYMBOL(pcim_iounmap); |
345 | |
346 | /** |
347 | * pcim_iomap_regions - Request and iomap PCI BARs |
348 | * @pdev: PCI device to map IO resources for |
349 | * @mask: Mask of BARs to request and iomap |
350 | * @name: Name used when requesting regions |
351 | * |
352 | * Request and iomap regions specified by @mask. |
353 | */ |
354 | int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) |
355 | { |
356 | void __iomem * const *iomap; |
357 | int i, rc; |
358 | |
359 | iomap = pcim_iomap_table(pdev); |
360 | if (!iomap) |
361 | return -ENOMEM; |
362 | |
363 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
364 | unsigned long len; |
365 | |
366 | if (!(mask & (1 << i))) |
367 | continue; |
368 | |
369 | rc = -EINVAL; |
370 | len = pci_resource_len(pdev, i); |
371 | if (!len) |
372 | goto err_inval; |
373 | |
374 | rc = pci_request_region(pdev, i, name); |
375 | if (rc) |
376 | goto err_inval; |
377 | |
378 | rc = -ENOMEM; |
379 | if (!pcim_iomap(pdev, i, 0)) |
380 | goto err_region; |
381 | } |
382 | |
383 | return 0; |
384 | |
385 | err_region: |
386 | pci_release_region(pdev, i); |
387 | err_inval: |
388 | while (--i >= 0) { |
389 | if (!(mask & (1 << i))) |
390 | continue; |
391 | pcim_iounmap(pdev, iomap[i]); |
392 | pci_release_region(pdev, i); |
393 | } |
394 | |
395 | return rc; |
396 | } |
397 | EXPORT_SYMBOL(pcim_iomap_regions); |
398 | |
399 | /** |
400 | * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones |
401 | * @pdev: PCI device to map IO resources for |
402 | * @mask: Mask of BARs to iomap |
403 | * @name: Name used when requesting regions |
404 | * |
405 | * Request all PCI BARs and iomap regions specified by @mask. |
406 | */ |
407 | int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, |
408 | const char *name) |
409 | { |
410 | int request_mask = ((1 << 6) - 1) & ~mask; |
411 | int rc; |
412 | |
413 | rc = pci_request_selected_regions(pdev, request_mask, name); |
414 | if (rc) |
415 | return rc; |
416 | |
417 | rc = pcim_iomap_regions(pdev, mask, name); |
418 | if (rc) |
419 | pci_release_selected_regions(pdev, request_mask); |
420 | return rc; |
421 | } |
422 | EXPORT_SYMBOL(pcim_iomap_regions_request_all); |
423 | |
424 | /** |
425 | * pcim_iounmap_regions - Unmap and release PCI BARs |
426 | * @pdev: PCI device to map IO resources for |
427 | * @mask: Mask of BARs to unmap and release |
428 | * |
429 | * Unmap and release regions specified by @mask. |
430 | */ |
431 | void pcim_iounmap_regions(struct pci_dev *pdev, int mask) |
432 | { |
433 | void __iomem * const *iomap; |
434 | int i; |
435 | |
436 | iomap = pcim_iomap_table(pdev); |
437 | if (!iomap) |
438 | return; |
439 | |
440 | for (i = 0; i < PCIM_IOMAP_MAX; i++) { |
441 | if (!(mask & (1 << i))) |
442 | continue; |
443 | |
444 | pcim_iounmap(pdev, iomap[i]); |
445 | pci_release_region(pdev, i); |
446 | } |
447 | } |
448 | EXPORT_SYMBOL(pcim_iounmap_regions); |
449 | |