1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/prefetch.h> |
3 | |
4 | /** |
5 | * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir. |
6 | * @ioc: The I/O Controller. |
7 | * @startsg: The scatter/gather list of coalesced chunks. |
8 | * @nents: The number of entries in the scatter/gather list. |
9 | * @hint: The DMA Hint. |
10 | * |
11 | * This function inserts the coalesced scatter/gather list chunks into the |
12 | * I/O Controller's I/O Pdir. |
13 | */ |
14 | static inline unsigned int |
15 | iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, |
16 | unsigned long hint, |
17 | void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long, |
18 | unsigned long)) |
19 | { |
20 | struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ |
21 | unsigned int n_mappings = 0; |
22 | unsigned long dma_offset = 0, dma_len = 0; |
23 | __le64 *pdirp = NULL; |
24 | |
25 | /* Horrible hack. For efficiency's sake, dma_sg starts one |
26 | * entry below the true start (it is immediately incremented |
27 | * in the loop) */ |
28 | dma_sg--; |
29 | |
30 | while (nents-- > 0) { |
31 | unsigned long vaddr; |
32 | long size; |
33 | |
34 | DBG_RUN_SG(" %d : %08lx %p/%05x\n" , nents, |
35 | (unsigned long)sg_dma_address(startsg), |
36 | sg_virt(startsg), startsg->length |
37 | ); |
38 | |
39 | |
40 | /* |
41 | ** Look for the start of a new DMA stream |
42 | */ |
43 | |
44 | if (sg_dma_address(startsg) & PIDE_FLAG) { |
45 | u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG; |
46 | |
47 | BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg))); |
48 | |
49 | dma_sg++; |
50 | |
51 | dma_len = sg_dma_len(startsg); |
52 | sg_dma_len(startsg) = 0; |
53 | dma_offset = (unsigned long) pide & ~IOVP_MASK; |
54 | n_mappings++; |
55 | #if defined(ZX1_SUPPORT) |
56 | /* Pluto IOMMU IO Virt Address is not zero based */ |
57 | sg_dma_address(dma_sg) = pide | ioc->ibase; |
58 | #else |
59 | /* SBA, ccio, and dino are zero based. |
60 | * Trying to save a few CPU cycles for most users. |
61 | */ |
62 | sg_dma_address(dma_sg) = pide; |
63 | #endif |
64 | pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]); |
65 | prefetchw(x: pdirp); |
66 | } |
67 | |
68 | BUG_ON(pdirp == NULL); |
69 | |
70 | vaddr = (unsigned long)sg_virt(sg: startsg); |
71 | sg_dma_len(dma_sg) += startsg->length; |
72 | size = startsg->length + dma_offset; |
73 | dma_offset = 0; |
74 | #ifdef IOMMU_MAP_STATS |
75 | ioc->msg_pages += startsg->length >> IOVP_SHIFT; |
76 | #endif |
77 | do { |
78 | iommu_io_pdir_entry(pdirp, KERNEL_SPACE, |
79 | vaddr, hint); |
80 | vaddr += IOVP_SIZE; |
81 | size -= IOVP_SIZE; |
82 | pdirp++; |
83 | } while(unlikely(size > 0)); |
84 | startsg++; |
85 | } |
86 | return(n_mappings); |
87 | } |
88 | |
89 | |
90 | /* |
91 | ** First pass is to walk the SG list and determine where the breaks are |
92 | ** in the DMA stream. Allocates PDIR entries but does not fill them. |
93 | ** Returns the number of DMA chunks. |
94 | ** |
95 | ** Doing the fill separate from the coalescing/allocation keeps the |
96 | ** code simpler. Future enhancement could make one pass through |
97 | ** the sglist do both. |
98 | */ |
99 | |
100 | static inline unsigned int |
101 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, |
102 | struct scatterlist *startsg, int nents, |
103 | int (*iommu_alloc_range)(struct ioc *, struct device *, size_t)) |
104 | { |
105 | struct scatterlist *contig_sg; /* contig chunk head */ |
106 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
107 | unsigned int n_mappings = 0; |
108 | unsigned int max_seg_size = min(dma_get_max_seg_size(dev), |
109 | (unsigned)DMA_CHUNK_SIZE); |
110 | unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1; |
111 | if (max_seg_boundary) /* check if the addition above didn't overflow */ |
112 | max_seg_size = min(max_seg_size, max_seg_boundary); |
113 | |
114 | while (nents > 0) { |
115 | |
116 | /* |
117 | ** Prepare for first/next DMA stream |
118 | */ |
119 | contig_sg = startsg; |
120 | dma_len = startsg->length; |
121 | dma_offset = startsg->offset; |
122 | |
123 | /* PARANOID: clear entries */ |
124 | sg_dma_address(startsg) = 0; |
125 | sg_dma_len(startsg) = 0; |
126 | |
127 | /* |
128 | ** This loop terminates one iteration "early" since |
129 | ** it's always looking one "ahead". |
130 | */ |
131 | while(--nents > 0) { |
132 | unsigned long prev_end, sg_start; |
133 | |
134 | prev_end = (unsigned long)sg_virt(sg: startsg) + |
135 | startsg->length; |
136 | |
137 | startsg++; |
138 | sg_start = (unsigned long)sg_virt(sg: startsg); |
139 | |
140 | /* PARANOID: clear entries */ |
141 | sg_dma_address(startsg) = 0; |
142 | sg_dma_len(startsg) = 0; |
143 | |
144 | /* |
145 | ** First make sure current dma stream won't |
146 | ** exceed max_seg_size if we coalesce the |
147 | ** next entry. |
148 | */ |
149 | if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) > |
150 | max_seg_size)) |
151 | break; |
152 | |
153 | /* |
154 | * Next see if we can append the next chunk (i.e. |
155 | * it must end on one page and begin on another, or |
156 | * it must start on the same address as the previous |
157 | * entry ended. |
158 | */ |
159 | if (unlikely((prev_end != sg_start) || |
160 | ((prev_end | sg_start) & ~PAGE_MASK))) |
161 | break; |
162 | |
163 | dma_len += startsg->length; |
164 | } |
165 | |
166 | /* |
167 | ** End of DMA Stream |
168 | ** Terminate last VCONTIG block. |
169 | ** Allocate space for DMA stream. |
170 | */ |
171 | sg_dma_len(contig_sg) = dma_len; |
172 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); |
173 | sg_dma_address(contig_sg) = |
174 | PIDE_FLAG |
175 | | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT) |
176 | | dma_offset; |
177 | n_mappings++; |
178 | } |
179 | |
180 | return n_mappings; |
181 | } |
182 | |
183 | |