1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | |
3 | #include <linux/io.h> |
4 | #include <linux/slab.h> |
5 | #include <linux/vmalloc.h> |
6 | |
7 | #include <mm/mmu_decl.h> |
8 | |
9 | void __iomem *ioremap_wt(phys_addr_t addr, unsigned long size) |
10 | { |
11 | pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL); |
12 | |
13 | return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); |
14 | } |
15 | EXPORT_SYMBOL(ioremap_wt); |
16 | |
17 | void __iomem * |
18 | __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) |
19 | { |
20 | unsigned long v; |
21 | phys_addr_t p, offset; |
22 | int err; |
23 | |
24 | /* |
25 | * If the address lies within the first 16 MB, assume it's in ISA |
26 | * memory space |
27 | */ |
28 | if (addr < SZ_16M) |
29 | addr += _ISA_MEM_BASE; |
30 | |
31 | /* |
32 | * Choose an address to map it to. |
33 | * Once the vmalloc system is running, we use it. |
34 | * Before then, we use space going down from IOREMAP_TOP |
35 | * (ioremap_bot records where we're up to). |
36 | */ |
37 | p = addr & PAGE_MASK; |
38 | offset = addr & ~PAGE_MASK; |
39 | size = PAGE_ALIGN(addr + size) - p; |
40 | |
41 | #ifndef CONFIG_CRASH_DUMP |
42 | /* |
43 | * Don't allow anybody to remap normal RAM that we're using. |
44 | * mem_init() sets high_memory so only do the check after that. |
45 | */ |
46 | if (slab_is_available() && p <= virt_to_phys(high_memory - 1) && |
47 | page_is_ram(__phys_to_pfn(p))) { |
48 | pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n" , __func__, |
49 | (unsigned long long)p, __builtin_return_address(0)); |
50 | return NULL; |
51 | } |
52 | #endif |
53 | |
54 | if (size == 0) |
55 | return NULL; |
56 | |
57 | /* |
58 | * Is it already mapped? Perhaps overlapped by a previous |
59 | * mapping. |
60 | */ |
61 | v = p_block_mapped(p); |
62 | if (v) |
63 | return (void __iomem *)v + offset; |
64 | |
65 | if (slab_is_available()) |
66 | return generic_ioremap_prot(addr, size, prot); |
67 | |
68 | /* |
69 | * Should check if it is a candidate for a BAT mapping |
70 | */ |
71 | pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n" , caller); |
72 | |
73 | err = early_ioremap_range(ioremap_bot - size - PAGE_SIZE, p, size, prot); |
74 | if (err) |
75 | return NULL; |
76 | ioremap_bot -= size + PAGE_SIZE; |
77 | |
78 | return (void __iomem *)ioremap_bot + offset; |
79 | } |
80 | |
81 | void iounmap(volatile void __iomem *addr) |
82 | { |
83 | /* |
84 | * If mapped by BATs then there is nothing to do. |
85 | * Calling vfree() generates a benign warning. |
86 | */ |
87 | if (v_block_mapped((unsigned long)addr)) |
88 | return; |
89 | |
90 | generic_iounmap(addr); |
91 | } |
92 | EXPORT_SYMBOL(iounmap); |
93 | |