1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Virtual DMA allocation |
4 | * |
5 | * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de) |
6 | * |
7 | * 11/26/2000 -- disabled the existing code because it didn't work for |
8 | * me in 2.4. Replaced with a significantly more primitive version |
9 | * similar to the sun3 code. the old functionality was probably more |
10 | * desirable, but.... -- Sam Creasey (sammy@oh.verio.com) |
11 | * |
12 | */ |
13 | |
14 | #include <linux/kernel.h> |
15 | #include <linux/init.h> |
16 | #include <linux/bitops.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/memblock.h> |
19 | #include <linux/vmalloc.h> |
20 | |
21 | #include <asm/sun3x.h> |
22 | #include <asm/dvma.h> |
23 | #include <asm/io.h> |
24 | #include <asm/page.h> |
25 | #include <asm/tlbflush.h> |
26 | |
27 | /* IOMMU support */ |
28 | |
29 | #define IOMMU_ADDR_MASK 0x03ffe000 |
30 | #define IOMMU_CACHE_INHIBIT 0x00000040 |
31 | #define IOMMU_FULL_BLOCK 0x00000020 |
32 | #define IOMMU_MODIFIED 0x00000010 |
33 | #define IOMMU_USED 0x00000008 |
34 | #define IOMMU_WRITE_PROTECT 0x00000004 |
35 | #define IOMMU_DT_MASK 0x00000003 |
36 | #define IOMMU_DT_INVALID 0x00000000 |
37 | #define IOMMU_DT_VALID 0x00000001 |
38 | #define IOMMU_DT_BAD 0x00000002 |
39 | |
40 | |
41 | static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU; |
42 | |
43 | |
44 | #define dvma_entry_paddr(index) (iommu_pte[index] & IOMMU_ADDR_MASK) |
45 | #define dvma_entry_vaddr(index,paddr) ((index << DVMA_PAGE_SHIFT) | \ |
46 | (paddr & (DVMA_PAGE_SIZE-1))) |
47 | #if 0 |
48 | #define dvma_entry_set(index,addr) (iommu_pte[index] = \ |
49 | (addr & IOMMU_ADDR_MASK) | \ |
50 | IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT) |
51 | #else |
52 | #define dvma_entry_set(index,addr) (iommu_pte[index] = \ |
53 | (addr & IOMMU_ADDR_MASK) | \ |
54 | IOMMU_DT_VALID) |
55 | #endif |
56 | #define dvma_entry_clr(index) (iommu_pte[index] = IOMMU_DT_INVALID) |
57 | #define dvma_entry_hash(addr) ((addr >> DVMA_PAGE_SHIFT) ^ \ |
58 | ((addr & 0x03c00000) >> \ |
59 | (DVMA_PAGE_SHIFT+4))) |
60 | |
61 | #ifdef DEBUG |
62 | /* code to print out a dvma mapping for debugging purposes */ |
63 | static void dvma_print (unsigned long dvma_addr) |
64 | { |
65 | |
66 | unsigned long index; |
67 | |
68 | index = dvma_addr >> DVMA_PAGE_SHIFT; |
69 | |
70 | pr_info("idx %lx dvma_addr %08lx paddr %08lx\n" , index, dvma_addr, |
71 | dvma_entry_paddr(index)); |
72 | } |
73 | #endif |
74 | |
75 | |
76 | /* create a virtual mapping for a page assigned within the IOMMU |
77 | so that the cpu can reach it easily */ |
78 | inline int dvma_map_cpu(unsigned long kaddr, |
79 | unsigned long vaddr, int len) |
80 | { |
81 | pgd_t *pgd; |
82 | p4d_t *p4d; |
83 | pud_t *pud; |
84 | unsigned long end; |
85 | int ret = 0; |
86 | |
87 | kaddr &= PAGE_MASK; |
88 | vaddr &= PAGE_MASK; |
89 | |
90 | end = PAGE_ALIGN(vaddr + len); |
91 | |
92 | pr_debug("dvma: mapping kern %08lx to virt %08lx\n" , kaddr, vaddr); |
93 | pgd = pgd_offset_k(vaddr); |
94 | p4d = p4d_offset(pgd, address: vaddr); |
95 | pud = pud_offset(p4d, address: vaddr); |
96 | |
97 | do { |
98 | pmd_t *pmd; |
99 | unsigned long end2; |
100 | |
101 | if((pmd = pmd_alloc(mm: &init_mm, pud, address: vaddr)) == NULL) { |
102 | ret = -ENOMEM; |
103 | goto out; |
104 | } |
105 | |
106 | if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK)) |
107 | end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK; |
108 | else |
109 | end2 = end; |
110 | |
111 | do { |
112 | pte_t *pte; |
113 | unsigned long end3; |
114 | |
115 | if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) { |
116 | ret = -ENOMEM; |
117 | goto out; |
118 | } |
119 | |
120 | if((end2 & PMD_MASK) > (vaddr & PMD_MASK)) |
121 | end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK; |
122 | else |
123 | end3 = end2; |
124 | |
125 | do { |
126 | pr_debug("mapping %08lx phys to %08lx\n" , |
127 | __pa(kaddr), vaddr); |
128 | set_pte(ptep: pte, pte: pfn_pte(page_nr: virt_to_pfn((void *)kaddr), |
129 | PAGE_KERNEL)); |
130 | pte++; |
131 | kaddr += PAGE_SIZE; |
132 | vaddr += PAGE_SIZE; |
133 | } while(vaddr < end3); |
134 | |
135 | } while(vaddr < end2); |
136 | |
137 | } while(vaddr < end); |
138 | |
139 | flush_tlb_all(); |
140 | |
141 | out: |
142 | return ret; |
143 | } |
144 | |
145 | |
146 | int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, int len) |
147 | { |
148 | unsigned long end, index; |
149 | |
150 | index = baddr >> DVMA_PAGE_SHIFT; |
151 | end = ((baddr+len) >> DVMA_PAGE_SHIFT); |
152 | |
153 | if(len & ~DVMA_PAGE_MASK) |
154 | end++; |
155 | |
156 | for(; index < end ; index++) { |
157 | // if(dvma_entry_use(index)) |
158 | // BUG(); |
159 | // pr_info("mapping pa %lx to ba %lx\n", __pa(kaddr), |
160 | // index << DVMA_PAGE_SHIFT); |
161 | |
162 | dvma_entry_set(index, __pa(kaddr)); |
163 | |
164 | iommu_pte[index] |= IOMMU_FULL_BLOCK; |
165 | // dvma_entry_inc(index); |
166 | |
167 | kaddr += DVMA_PAGE_SIZE; |
168 | } |
169 | |
170 | #ifdef DEBUG |
171 | for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++) |
172 | dvma_print(index << DVMA_PAGE_SHIFT); |
173 | #endif |
174 | return 0; |
175 | |
176 | } |
177 | |
178 | void dvma_unmap_iommu(unsigned long baddr, int len) |
179 | { |
180 | |
181 | int index, end; |
182 | |
183 | |
184 | index = baddr >> DVMA_PAGE_SHIFT; |
185 | end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT); |
186 | |
187 | for(; index < end ; index++) { |
188 | pr_debug("freeing bus mapping %08x\n" , |
189 | index << DVMA_PAGE_SHIFT); |
190 | #if 0 |
191 | if(!dvma_entry_use(index)) |
192 | pr_info("dvma_unmap freeing unused entry %04x\n" , |
193 | index); |
194 | else |
195 | dvma_entry_dec(index); |
196 | #endif |
197 | dvma_entry_clr(index); |
198 | } |
199 | |
200 | } |
201 | |